From 5bea35027b1f4f4568570ffab0b5027bd027529d Mon Sep 17 00:00:00 2001 From: Vishesh Date: Thu, 17 Oct 2024 16:29:16 +0530 Subject: [PATCH 01/22] Improve logging to include more identifiable information for kvm plugin --- .../main/java/com/cloud/storage/VolumeVO.java | 2 +- .../engine/cloud/entity/api/db/VMEntityVO.java | 3 ++- .../storage/datastore/db/StoragePoolVO.java | 3 ++- .../main/java/com/cloud/ha/KVMInvestigator.java | 15 +++++++-------- .../apache/cloudstack/kvm/ha/KVMHAProvider.java | 10 +++++----- .../kvm/ha/KVMHostActivityChecker.java | 16 ++++++++-------- 6 files changed, 25 insertions(+), 24 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index ea57ef91237a..bb1a3b306595 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -514,7 +514,7 @@ public void setUpdated(Date updated) { @Override public String toString() { - return new StringBuilder("Vol[").append(id).append("|name=").append(name).append("|vm=").append(instanceId).append("|").append(volumeType).append("]").toString(); + return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "volumeType", "instanceId")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java index af48e5e04acd..a1627ee9cd43 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java @@ -50,6 +50,7 @@ import com.cloud.vm.VirtualMachine.State; import com.google.gson.Gson; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vm_instance") @@ -460,7 +461,7 @@ public void setDetails(Map details) { @Override public String toString() { if (toString == null) { - toString = new StringBuilder("VM[").append(type.toString()).append("|").append(hostName).append("]").toString(); + toString = String.format("VM %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "instanceName", "uuid", "type")); } return toString; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 707091adb873..eebddc166b56 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -26,6 +26,7 @@ import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -370,7 +371,7 @@ public int hashCode() { @Override public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(poolType).append("]").toString(); + return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "poolType")); } @Override diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java index 8fc748262424..eb64f4bc4397 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java @@ -60,7 +60,7 @@ public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws Unkno return haManager.isVMAliveOnHost(host); } Status status = isAgentAlive(host); - logger.debug("HA: HOST is ineligible legacy state " + status + " for host " + host.getId()); + logger.debug("HA: HOST is ineligible legacy state {} for host {}", status, host); if (status == null) { throw new UnknownVM(); } @@ -88,8 +88,7 @@ public Status isAgentAlive(Host agent) { storageSupportHA = storageSupportHa(zonePools); } if (!storageSupportHA) { - logger.warn( - "Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation."); + logger.warn("Agent investigation was requested on host {}, but host does not support investigation because it has no NFS storage. Skipping investigation.", agent); return Status.Disconnected; } @@ -104,7 +103,7 @@ public Status isAgentAlive(Host agent) { hostStatus = answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + agent.getId()); + logger.debug("Failed to send command to host: {}", agent); } if (hostStatus == null) { hostStatus = Status.Disconnected; @@ -116,18 +115,18 @@ public Status isAgentAlive(Host agent) { || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; } - logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); + logger.debug("Investigating host:{} via neighbouring host:{}", agent, neighbor); try { Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); if (answer != null) { neighbourStatus = answer.getResult() ? Status.Down : Status.Up; - logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); + logger.debug("Neighbouring host:{} returned status:{} for the investigated host:{}", neighbor, neighbourStatus, agent); if (neighbourStatus == Status.Up) { break; } } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: {}", neighbor); } } if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { @@ -136,7 +135,7 @@ public Status isAgentAlive(Host agent) { if (neighbourStatus == Status.Down && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { hostStatus = Status.Down; } - logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId()); + logger.debug("HA: HOST is ineligible legacy state {} for host {}", hostStatus, agent); return hostStatus; } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java index 81daabf59d76..0e7a6f233b03 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java @@ -73,12 +73,12 @@ public boolean recover(Host r) throws HARecoveryException { final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.RESET, null); return resp.getSuccess(); } else { - logger.warn("OOBM recover operation failed for the host " + r.getName()); + logger.warn("OOBM recover operation failed for the host {}", r); return false; } } catch (Exception e){ - logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); - throw new HARecoveryException(" OOBM service is not configured or enabled for this host " + r.getName(), e); + logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); + throw new HARecoveryException(" OOBM service is not configured or enabled for this host " + r, e); } } @@ -90,11 +90,11 @@ public boolean fence(Host r) throws HAFenceException { final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.OFF, null); return resp.getSuccess(); } else { - logger.warn("OOBM fence operation failed for this host " + r.getName()); + logger.warn("OOBM fence operation failed for this host {}", r); return false; } } catch (Exception e){ - logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); + logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); throw new HAFenceException("OBM service is not configured or enabled for this host " + r.getName() , e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index 10d684bbdd32..eec2b26ebb63 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -155,7 +155,7 @@ private boolean isVMActivtyOnHost(Host agent, DateTime suspectTime) throws HAChe for (StoragePool pool : poolVolMap.keySet()) { activityStatus = verifyActivityOfStorageOnHost(poolVolMap, pool, agent, suspectTime, activityStatus); if (!activityStatus) { - logger.warn(String.format("It seems that the storage pool [%s] does not have activity on %s.", pool.getId(), agent.toString())); + logger.warn("It seems that the storage pool [{}] does not have activity on {}.", pool, agent.toString()); break; } } @@ -167,20 +167,20 @@ protected boolean verifyActivityOfStorageOnHost(HashMap volume_list = poolVolMap.get(pool); final CheckVMActivityOnStoragePoolCommand cmd = new CheckVMActivityOnStoragePoolCommand(agent, pool, volume_list, suspectTime); - logger.debug(String.format("Checking VM activity for %s on storage pool [%s].", agent.toString(), pool.getId())); + logger.debug("Checking VM activity for {} on storage pool [{}].", agent.toString(), pool); try { Answer answer = storageManager.sendToPool(pool, getNeighbors(agent), cmd); if (answer != null) { activityStatus = !answer.getResult(); - logger.debug(String.format("%s %s activity on storage pool [%s]", agent.toString(), activityStatus ? "has" : "does not have", pool.getId())); + logger.debug("{} {} activity on storage pool [{}]", agent.toString(), activityStatus ? "has" : "does not have", pool); } else { - String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool.getId()); + String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool); logger.debug(message); throw new IllegalStateException(message); } } catch (StorageUnavailableException e){ - String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool.getId(), agent.toString()); + String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool, agent.toString()); logger.warn(message, e); throw new HACheckerException(message, e); } @@ -191,15 +191,15 @@ private HashMap> getVolumeUuidOnHost(Host agent) { List vm_list = vmInstanceDao.listByHostId(agent.getId()); List volume_list = new ArrayList(); for (VirtualMachine vm : vm_list) { - logger.debug(String.format("Retrieving volumes of VM [%s]...", vm.getId())); + logger.debug("Retrieving volumes of VM [{}]...", vm); List vm_volume_list = volumeDao.findByInstance(vm.getId()); volume_list.addAll(vm_volume_list); } HashMap> poolVolMap = new HashMap>(); for (Volume vol : volume_list) { - logger.debug(String.format("Retrieving storage pool [%s] of volume [%s]...", vol.getPoolId(), vol.getId())); StoragePool sp = storagePool.findById(vol.getPoolId()); + logger.debug("Retrieving storage pool [{}] of volume [{}]...", sp, vol); if (!poolVolMap.containsKey(sp)) { List list = new ArrayList(); list.add(vol); @@ -215,7 +215,7 @@ private HashMap> getVolumeUuidOnHost(Host agent) { public long[] getNeighbors(Host agent) { List neighbors = new ArrayList(); List cluster_hosts = resourceManager.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); - logger.debug(String.format("Retrieving all \"Up\" hosts from cluster [%s]...", agent.getClusterId())); + logger.debug("Retrieving all \"Up\" hosts from cluster [{}]...", agent.getClusterId()); for (HostVO host : cluster_hosts) { if (host.getId() == agent.getId() || (host.getHypervisorType() != Hypervisor.HypervisorType.KVM && host.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; From e38876453f727ebae403bc61f465897e799aa8ef Mon Sep 17 00:00:00 2001 From: Vishesh Date: Fri, 18 Oct 2024 12:14:33 +0530 Subject: [PATCH 02/22] Update logging for scaleio plugin --- .../main/java/com/cloud/storage/VolumeVO.java | 5 +- .../storage/snapshot/SnapshotObject.java | 8 ++ .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 23 ++-- .../storage/helper/VMSnapshotHelperImpl.java | 4 +- .../storage/vmsnapshot/VMSnapshotHelper.java | 3 +- .../datastore/PrimaryDataStoreImpl.java | 3 +- .../ScaleIOGatewayClientConnectionPool.java | 34 ++++-- .../driver/ScaleIOPrimaryDataStoreDriver.java | 110 ++++++++++-------- .../ScaleIOPrimaryDataStoreLifeCycle.java | 16 +-- .../manager/ScaleIOSDCManagerImpl.java | 55 ++++++--- .../provider/ScaleIOHostListener.java | 20 ++-- .../ScaleIOPrimaryDataStoreDriverTest.java | 12 +- .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 2 +- 13 files changed, 182 insertions(+), 113 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index bb1a3b306595..45b6bd97c728 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -514,7 +514,10 @@ public void setUpdated(Date updated) { @Override public String toString() { - return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "volumeType", "instanceId")); + return String.format("StoragePool %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", + "uuid", "volumeType", "instanceId", "path")); } @Override diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 961a647d7a8c..ce21a723b243 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -41,6 +41,8 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -466,4 +468,10 @@ public boolean delete() { public Class getEntityType() { return Snapshot.class; } + + @Override + public String toString() { + return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) + .append("volumeId", getVolumeId()).append("path", getPath()).toString()); + } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index d27beecfddac..11786e4f0ab8 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -162,8 +163,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { Map srcVolumeDestSnapshotMap = new HashMap<>(); List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); - StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); long prev_chain_size = 0; long virtual_size=0; for (VolumeObjectTO volume : volumeTOs) { @@ -188,7 +188,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { vmSnapshotVO.setParent(current.getId()); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap); if (snapshotGroup == null) { throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool"); @@ -291,7 +291,8 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { boolean result = false; try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + Long storagePoolId = storagePool.getId(); Map srcSnapshotDestVolumeMap = new HashMap<>(); for (VolumeObjectTO volume : volumeTOs) { VMSnapshotDetailsVO vmSnapshotDetail = vmSnapshotDetailsDao.findDetail(vmSnapshotVO.getId(), "Vol_" + volume.getId() + "_Snapshot"); @@ -305,7 +306,7 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for reverting VM snapshot: " + vmSnapshot.getName()); } - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); result = client.revertSnapshot(systemId, srcSnapshotDestVolumeMap); if (!result) { throw new CloudRuntimeException("Failed to revert VM snapshot on PowerFlex storage pool"); @@ -314,7 +315,7 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { finalizeRevert(vmSnapshotVO, volumeTOs); result = true; } catch (Exception e) { - String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed due to " + e.getMessage(); + String errMsg = String.format("Revert VM: %s to snapshot: %s failed due to %s", userVm, vmSnapshotVO, e.getMessage()); logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { @@ -378,8 +379,8 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); - Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); - String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + String systemId = storagePoolDetailsDao.findDetail(storagePool.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); if (systemId == null) { throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName()); } @@ -390,7 +391,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { } String snapshotGroupId = vmSnapshotDetailsVO.getValue(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); int volumesDeleted = client.deleteSnapshotGroup(systemId, snapshotGroupId); if (volumesDeleted <= 0) { throw new CloudRuntimeException("Failed to delete VM snapshot: " + vmSnapshot.getName()); @@ -509,7 +510,7 @@ private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm } } - private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + private ScaleIOGatewayClient getScaleIOClient(final StoragePool storagePool) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index 01842441e269..e582ae6b4c64 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -150,7 +150,7 @@ public VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { } @Override - public Long getStoragePoolForVM(Long vmId) { + public StoragePoolVO getStoragePoolForVM(Long vmId) { List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vmId); if (rootVolumes == null || rootVolumes.isEmpty()) { throw new InvalidParameterValueException("Failed to find root volume for the user vm:" + vmId); @@ -166,7 +166,7 @@ public Long getStoragePoolForVM(Long vmId) { throw new InvalidParameterValueException("Storage pool for the user vm:" + vmId + " is in maintenance"); } - return rootVolumePool.getId(); + return rootVolumePool; } @Override diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 35153a109961..6467072b1b3e 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -20,6 +20,7 @@ import java.util.List; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.VMSnapshotTO; @@ -37,7 +38,7 @@ public interface VMSnapshotHelper { VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); - Long getStoragePoolForVM(Long vmId); + StoragePoolVO getStoragePoolForVM(Long vmId); Storage.StoragePoolType getStoragePoolType(Long poolId); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 7f373fa9988c..cdf37c5fc9e4 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -469,6 +469,7 @@ public StoragePoolType getParentPoolType() { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid"); + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, + "id", "name", "uuid"); } } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java index a9dc8b42cd5a..e605b159c991 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java @@ -22,6 +22,8 @@ import java.security.NoSuchAlgorithmException; import java.util.concurrent.ConcurrentHashMap; +import com.cloud.storage.StoragePool; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -49,9 +51,26 @@ private ScaleIOGatewayClientConnectionPool() { gatewayClients = new ConcurrentHashMap(); } - public ScaleIOGatewayClient getClient(Long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) + public ScaleIOGatewayClient getClient(StoragePool storagePool, + StoragePoolDetailsDao storagePoolDetailsDao) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { - Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, "Invalid storage pool id"); + return getClient(storagePool.getId(), storagePool.getUuid(), storagePoolDetailsDao); + } + + + public ScaleIOGatewayClient getClient(DataStore dataStore, + StoragePoolDetailsDao storagePoolDetailsDao) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + return getClient(dataStore.getId(), dataStore.getUuid(), storagePoolDetailsDao); + } + + + private ScaleIOGatewayClient getClient(Long storagePoolId, String storagePoolUuid, + StoragePoolDetailsDao storagePoolDetailsDao) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + + Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, + "Invalid storage pool id"); ScaleIOGatewayClient client = null; synchronized (gatewayClients) { @@ -67,23 +86,24 @@ public ScaleIOGatewayClient getClient(Long storagePoolId, StoragePoolDetailsDao client = new ScaleIOGatewayClientImpl(url, username, password, false, clientTimeout, clientMaxConnections); gatewayClients.put(storagePoolId, client); - logger.debug("Added gateway client for the storage pool: " + storagePoolId); + logger.debug("Added gateway client for the storage pool [id: {}, uuid: {}]", storagePoolId, storagePoolUuid); } } return client; } - public boolean removeClient(Long storagePoolId) { - Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, "Invalid storage pool id"); + public boolean removeClient(DataStore dataStore) { + Preconditions.checkArgument(dataStore != null && dataStore.getId() > 0, + "Invalid storage pool id"); ScaleIOGatewayClient client = null; synchronized (gatewayClients) { - client = gatewayClients.remove(storagePoolId); + client = gatewayClients.remove(dataStore.getId()); } if (client != null) { - logger.debug("Removed gateway client for the storage pool: " + storagePoolId); + logger.debug("Removed gateway client for the storage pool: {}", dataStore); return true; } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 8044e787bd2d..63843661f02c 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -151,8 +151,12 @@ public ScaleIOPrimaryDataStoreDriver() { sdcManager = new ScaleIOSDCManagerImpl(); } - public ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + ScaleIOGatewayClient getScaleIOClient(final StoragePool storagePool) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); + } + + ScaleIOGatewayClient getScaleIOClient(final DataStore dataStore) throws Exception { + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore, storagePoolDetailsDao); } private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception { @@ -160,10 +164,10 @@ private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataS final String sdcId = sdcManager.prepareSDC(host, dataStore); if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); - throw new CloudRuntimeException("Unable to grant access to volume: " + volume.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + throw new CloudRuntimeException("Unable to grant access to volume: " + volume + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps); } @@ -197,22 +201,25 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore final String sdcId = sdcManager.prepareSDC(host, dataStore); if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); - throw new CloudRuntimeException(String.format("Unable to grant access to %s: %s, no Sdc connected with host ip: %s", dataObject.getType(), dataObject.getId(), host.getPrivateIpAddress())); + throw new CloudRuntimeException(String.format( + "Unable to grant access to %s: [id: %d, uuid: %s], no Sdc connected with host ip: %s", + dataObject.getType(), dataObject.getId(), + dataObject.getUuid(), host.getPrivateIpAddress())); } if (DataObjectType.VOLUME.equals(dataObject.getType())) { final VolumeVO volume = volumeDao.findById(dataObject.getId()); - logger.debug("Granting access for PowerFlex volume: " + volume.getPath()); + logger.debug("Granting access for PowerFlex volume: {} at path {}", volume, volume.getPath()); return setVolumeLimitsFromDetails(volume, host, dataStore); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); - logger.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + logger.debug("Granting access for PowerFlex template volume: {}", templatePoolRef.getInstallPath()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; - logger.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + logger.debug("Granting access for PowerFlex volume snapshot: {} at path {}", snapshot, snapshot.getPath()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } @@ -235,23 +242,26 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } try { - final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + final String sdcId = getConnectedSdc(dataStore, host); if (StringUtils.isBlank(sdcId)) { - logger.warn(String.format("Unable to revoke access for %s: %s, no Sdc connected with host ip: %s", dataObject.getType(), dataObject.getId(), host.getPrivateIpAddress())); + logger.warn("Unable to revoke access for {}: [id: {}, uuid: {}], " + + "no Sdc connected with host [id: {}, uuid: {}, ip: {}]", + dataObject.getType(), dataObject.getId(), dataObject.getUuid(), + host.getId(), host.getUuid(), host.getPrivateIpAddress()); return; } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); if (DataObjectType.VOLUME.equals(dataObject.getType())) { final VolumeVO volume = volumeDao.findById(dataObject.getId()); - logger.debug("Revoking access for PowerFlex volume: " + volume.getPath()); + logger.debug("Revoking access for PowerFlex volume: {} at path {}", volume, volume.getPath()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); - logger.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + logger.debug("Revoking access for PowerFlex template volume: {}", templatePoolRef.getInstallPath()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; - logger.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath()); + logger.debug("Revoking access for PowerFlex volume snapshot: {} at path {}", snapshot, snapshot.getPath()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } if (client.listVolumesMappedToSdc(sdcId).isEmpty()) { @@ -272,13 +282,15 @@ public void revokeVolumeAccess(String volumePath, Host host, DataStore dataStore try { logger.debug("Revoking access for PowerFlex volume: " + volumePath); - final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + final String sdcId = getConnectedSdc(dataStore, host); if (StringUtils.isBlank(sdcId)) { - logger.warn(String.format("Unable to revoke access for volume: %s, no Sdc connected with host ip: %s", volumePath, host.getPrivateIpAddress())); + logger.warn("Unable to revoke access for volume: {}, " + + "no Sdc connected with host [id: {}, uuid: {}, ip: {}]", + volumePath, host.getId(), host.getUuid(), host.getPrivateIpAddress()); return; } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volumePath), sdcId); if (client.listVolumesMappedToSdc(sdcId).isEmpty()) { sdcManager = ComponentContext.inject(sdcManager); @@ -294,19 +306,20 @@ private void revokeAccess(DataObject dataObject, EndPoint ep, DataStore dataStor revokeAccess(dataObject, host, dataStore); } - public String getConnectedSdc(long poolId, long hostId) { + public String getConnectedSdc(DataStore dataStore, Host host) { try { - StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(poolId, hostId); + StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(dataStore.getId(), host.getId()); if (poolHostVO == null) { return null; } - final ScaleIOGatewayClient client = getScaleIOClient(poolId); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore); if (client.isSdcConnected(poolHostVO.getLocalPath())) { return poolHostVO.getLocalPath(); } } catch (Exception e) { - logger.warn("Couldn't check SDC connection for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); + logger.warn(String.format("Couldn't check SDC connection for the host: %s and " + + "storage pool: %s due to %s", host, dataStore, e.getMessage()), e); } return null; @@ -424,7 +437,7 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback snapshots = snapshotDao.listByVolumeId(srcVolumeId); if (CollectionUtils.isNotEmpty(snapshots)) { for (SnapshotVO snapshot : snapshots) { - SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findByStoreSnapshot(DataStoreRole.Primary, srcPoolId, snapshot.getId()); + SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findByStoreSnapshot(DataStoreRole.Primary, srcStore.getId(), snapshot.getId()); if (snapshotStore == null) { continue; } @@ -979,7 +993,7 @@ public void deleteSourceVolumeAfterSuccessfulBlockCopy(DataObject srcData, Host String errMsg; try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath); - final ScaleIOGatewayClient client = getScaleIOClient(srcStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(srcStore); Boolean deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete source PowerFlex volume with id: " + scaleIOVolumeId; @@ -1000,7 +1014,7 @@ public void revertBlockCopyVolumeOperations(DataObject srcData, DataObject destD String errMsg; try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(destVolumePath); - final ScaleIOGatewayClient client = getScaleIOClient(destStore.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(destStore); Boolean deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId; @@ -1079,7 +1093,7 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { long srcPoolId = srcStore.getId(); long destPoolId = destStore.getId(); - final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); + final ScaleIOGatewayClient client = getScaleIOClient(srcStore); final String srcVolumePath = ((VolumeInfo) srcData).getPath(); final String srcVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath); final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId); @@ -1206,7 +1220,8 @@ private void resizeVolume(VolumeInfo volumeInfo) { try { String scaleIOVolumeId = ScaleIOUtil.getVolumePath(volumeInfo.getPath()); Long storagePoolId = volumeInfo.getPoolId(); - final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize(); @@ -1228,7 +1243,6 @@ private void resizeVolume(VolumeInfo volumeInfo) { } } - StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); boolean attachedRunning = false; long hostId = 0; @@ -1315,7 +1329,7 @@ private void resizeVolume(VolumeInfo volumeInfo) { storagePool.setUsedBytes(Math.min(usedBytes, capacityBytes)); storagePoolDao.update(storagePoolId, storagePool); } catch (Exception e) { - String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo + " due to " + e.getMessage(); logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1377,7 +1391,7 @@ public Map getCustomStorageStats(StoragePool pool) { Map customStats = new HashMap<>(); try { - final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(pool); int connectedSdcsCount = client.getConnectedSdcsCount(); customStats.put(ScaleIOUtil.CONNECTED_SDC_COUNT_STAT, String.valueOf(connectedSdcsCount)); } catch (Exception e) { @@ -1393,7 +1407,7 @@ public Pair getStorageStats(StoragePool storagePool) { Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(storagePool.getPath()); if (poolStatistics != null && poolStatistics.getNetMaxCapacityInBytes() != null && poolStatistics.getNetUsedCapacityInBytes() != null) { Long capacityBytes = poolStatistics.getNetMaxCapacityInBytes(); @@ -1401,7 +1415,7 @@ public Pair getStorageStats(StoragePool storagePool) { return new Pair(capacityBytes, usedBytes); } } catch (Exception e) { - String errMsg = "Unable to get storage stats for the pool: " + storagePool.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to get storage stats for the pool: " + storagePool + " due to " + e.getMessage(); logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1420,7 +1434,7 @@ public Pair getVolumeStats(StoragePool storagePool, String volumePat Preconditions.checkArgument(StringUtils.isNotEmpty(volumePath), "volumePath cannot be null"); try { - final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(storagePool); VolumeStatistics volumeStatistics = client.getVolumeStatistics(ScaleIOUtil.getVolumePath(volumePath)); if (volumeStatistics != null) { Long provisionedSizeInBytes = volumeStatistics.getNetProvisionedAddressesInBytes(); @@ -1428,7 +1442,7 @@ public Pair getVolumeStats(StoragePool storagePool, String volumePat return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); } } catch (Exception e) { - String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool + " due to " + e.getMessage(); logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1447,10 +1461,10 @@ public boolean canHostAccessStoragePool(Host host, StoragePool pool) { if (poolHostVO == null) { return false; } - final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(pool); return client.isSdcConnected(poolHostVO.getLocalPath()); } catch (Exception e) { - logger.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); + logger.warn("Unable to check the host: {} access to storage pool: {} due to {}", host, pool, e.getMessage(), e); return false; } } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index 7bbe0331c071..c72253f5c1cb 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -262,10 +262,10 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); if (hostsInCluster.isEmpty()) { primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId()); + throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + cluster); } - logger.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); + logger.debug("Attaching the pool to each of the hosts in the cluster: {}", cluster); List poolHosts = new ArrayList(); for (HostVO host : hostsInCluster) { try { @@ -273,12 +273,12 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { poolHosts.add(host); } } catch (Exception e) { - logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "on the cluster: " + primaryDataStoreInfo.getClusterId(), e); + logger.warn(String.format("Unable to establish a connection between host: %s and pool: %s on the cluster: %s", host, dataStore, cluster), e); } } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + logger.warn("No host can access storage pool '{}' on cluster '{}'.", primaryDataStoreInfo, cluster); } dataStoreHelper.attachCluster(dataStore); @@ -360,17 +360,17 @@ public boolean deleteDataStore(DataStore dataStore) { DeleteStoragePoolCommand deleteStoragePoolCommand = new DeleteStoragePoolCommand(storagePool); final Answer answer = agentMgr.easySend(poolHostVO.getHostId(), deleteStoragePoolCommand); if (answer != null && answer.getResult()) { - logger.info("Successfully deleted storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + logger.info("Successfully deleted storage pool: {} from host: {}", storagePool, poolHostVO.getHostId()); } else { if (answer != null) { - logger.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId() + " , result: " + answer.getResult()); + logger.error("Failed to delete storage pool: {} from host: {} , result: {}", storagePool, poolHostVO.getHostId(), answer.getResult()); } else { - logger.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + logger.error("Failed to delete storage pool: {} from host: {}", storagePool, poolHostVO.getHostId()); } } } - ScaleIOGatewayClientConnectionPool.getInstance().removeClient(dataStore.getId()); + ScaleIOGatewayClientConnectionPool.getInstance().removeClient(dataStore); return dataStoreHelper.deletePrimaryDataStore(dataStore); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java index 4d3a78f6875f..003fcd617697 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java @@ -29,7 +29,9 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -70,6 +72,8 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable { @Inject StoragePoolHostDao storagePoolHostDao; @Inject + private PrimaryDataStoreDao storagePoolDao; + @Inject StoragePoolDetailsDao storagePoolDetailsDao; @Inject ConfigurationDao configDao; @@ -83,6 +87,7 @@ public ScaleIOSDCManagerImpl() { @Override public boolean areSDCConnectionsWithinLimit(Long storagePoolId) { + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); try { int connectedClientsLimit = StorageManager.STORAGE_POOL_CONNECTED_CLIENTS_LIMIT.valueIn(storagePoolId); if (connectedClientsLimit <= 0) { @@ -91,13 +96,19 @@ public boolean areSDCConnectionsWithinLimit(Long storagePoolId) { int connectedSdcsCount = getScaleIOClient(storagePoolId).getConnectedSdcsCount(); if (connectedSdcsCount < connectedClientsLimit) { - logger.debug(String.format("Current connected SDCs count: %d - SDC connections are within the limit (%d) on PowerFlex Storage with pool id: %d", connectedSdcsCount, connectedClientsLimit, storagePoolId)); + logger.debug("Current connected SDCs count: {} - SDC connections are " + + "within the limit ({}) on PowerFlex Storage with pool {}", + connectedSdcsCount, connectedClientsLimit, storagePool); return true; } - logger.debug(String.format("Current connected SDCs count: %d - SDC connections limit (%d) reached on PowerFlex Storage with pool id: %d", connectedSdcsCount, connectedClientsLimit, storagePoolId)); + logger.debug("Current connected SDCs count: {} - SDC connections limit ({}) " + + "reached on PowerFlex Storage with pool {}", + connectedSdcsCount, connectedClientsLimit, storagePool); return false; } catch (Exception e) { - String errMsg = "Unable to check SDC connections for the PowerFlex storage pool with id: " + storagePoolId + " due to " + e.getMessage(); + String errMsg = String.format( + "Unable to check SDC connections for the PowerFlex storage pool %s due to %s", + storagePool, e.getMessage()); logger.warn(errMsg, e); return false; } @@ -134,7 +145,8 @@ public String prepareSDC(Host host, DataStore dataStore) { long hostId = host.getId(); String sdcId = getConnectedSdc(host, dataStore); if (StringUtils.isNotBlank(sdcId)) { - logger.debug(String.format("SDC %s already connected for the pool: %d on host: %d, no need to prepare/start it", sdcId, poolId, hostId)); + logger.debug("SDC {} already connected for the pool: {} on host: {}, " + + "no need to prepare/start it", sdcId, dataStore, host); return sdcId; } @@ -174,7 +186,7 @@ public String prepareSDC(Host host, DataStore dataStore) { } int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs) - if (hostSdcConnected(sdcId, poolId, waitTimeInSecs)) { + if (hostSdcConnected(sdcId, dataStore, waitTimeInSecs)) { return sdcId; } return null; @@ -191,7 +203,7 @@ public String prepareSDC(Host host, DataStore dataStore) { } private String prepareSDCOnHost(Host host, DataStore dataStore, String systemId) { - logger.debug(String.format("Preparing SDC on the host %s (%s)", host.getId(), host.getName())); + logger.debug("Preparing SDC on the host {}", host.toString()); Map details = new HashMap<>(); details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); PrepareStorageClientCommand cmd = new PrepareStorageClientCommand(((PrimaryDataStore) dataStore).getPoolType(), dataStore.getUuid(), details); @@ -221,7 +233,7 @@ private String prepareSDCOnHost(Host host, DataStore dataStore, String systemId) Map poolDetails = prepareStorageClientAnswer.getDetailsMap(); if (MapUtils.isEmpty(poolDetails)) { - logger.warn(String.format("PowerFlex storage SDC details not found on the host: %s, try (re)install SDC and restart agent", host.getId())); + logger.warn("PowerFlex storage SDC details not found on the host: {}, try (re)install SDC and restart agent", host); return null; } @@ -230,11 +242,11 @@ private String prepareSDCOnHost(Host host, DataStore dataStore, String systemId) sdcId = poolDetails.get(ScaleIOGatewayClient.SDC_ID); } else if (poolDetails.containsKey(ScaleIOGatewayClient.SDC_GUID)) { String sdcGuid = poolDetails.get(ScaleIOGatewayClient.SDC_GUID); - sdcId = getHostSdcId(sdcGuid, dataStore.getId()); + sdcId = getHostSdcId(sdcGuid, dataStore); } if (StringUtils.isBlank(sdcId)) { - logger.warn(String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, try (re)install SDC and restart agent", host.getId())); + logger.warn("Couldn't retrieve PowerFlex storage SDC details from the host: {}, try (re)install SDC and restart agent", host); return null; } @@ -250,7 +262,7 @@ public boolean stopSDC(Host host, DataStore dataStore) { String systemId = storagePoolDetailsDao.findDetail(dataStore.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); if (systemId == null) { - throw new CloudRuntimeException("Unable to unprepare SDC, failed to get the system id for PowerFlex storage pool: " + dataStore.getName()); + throw new CloudRuntimeException("Unable to unprepare SDC, failed to get the system id for PowerFlex storage pool: " + dataStore); } GlobalLock lock = null; @@ -307,14 +319,14 @@ private boolean unprepareSDCOnHost(Host host, DataStore dataStore) { return true; } - private String getHostSdcId(String sdcGuid, long poolId) { + private String getHostSdcId(String sdcGuid, DataStore dataStore ) { try { - logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); - ScaleIOGatewayClient client = getScaleIOClient(poolId); + logger.debug("Try to get host SDC Id for pool: {}, with SDC guid {}", dataStore, sdcGuid); + ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); return client.getSdcIdByGuid(sdcGuid); } catch (Exception e) { - logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); - throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId)); + logger.error(String.format("Failed to get host SDC Id for pool: %s", dataStore), e); + throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", dataStore)); } } @@ -333,14 +345,18 @@ private String getConnectedSdc(Host host, DataStore dataStore) { return poolHostVO.getLocalPath(); } } catch (Exception e) { - logger.warn("Unable to get connected SDC for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); + logger.warn( + String.format("Unable to get connected SDC for the host: %s and storage pool: %s due to %s", + host, dataStore, e.getMessage()), e); } return null; } - private boolean hostSdcConnected(String sdcId, long poolId, int waitTimeInSecs) { - logger.debug(String.format("Waiting (for %d secs) for the SDC %s of the pool id: %d to connect", waitTimeInSecs, sdcId, poolId)); + private boolean hostSdcConnected(String sdcId, DataStore dataStore, int waitTimeInSecs) { + long poolId = dataStore.getId(); + logger.debug(String.format("Waiting (for %d secs) for the SDC %s of the pool %s to connect", + waitTimeInSecs, sdcId, dataStore)); int timeBetweenTries = 1000; // Try more frequently (every sec) and return early if connected while (waitTimeInSecs > 0) { if (isHostSdcConnected(sdcId, poolId)) { @@ -366,7 +382,8 @@ private boolean isHostSdcConnected(String sdcId, long poolId) { } private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao); } @Override diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index 737cc818be87..de0611a523a6 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; @@ -89,7 +90,7 @@ public boolean hostConnect(long hostId, long poolId) { storagePoolHost.setLocalPath(sdcId); _storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost); } - logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId); + logger.info("Connection established between storage pool: {} and host: {}", storagePool, host); } return true; } @@ -133,13 +134,16 @@ private String getSdcIdOfHost(HostVO host, StoragePool storagePool) { } private String getHostSdcId(String sdcGuid, long poolId) { + StoragePoolVO storagePool = _primaryDataStoreDao.findById(poolId); try { - logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); - ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); + logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", storagePool, sdcGuid)); + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, _storagePoolDetailsDao); return client.getSdcIdByGuid(sdcGuid); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); - throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId)); + logger.error(String.format("Failed to get host SDC Id for pool: %s", storagePool), e); + throw new CloudRuntimeException(String.format( + "Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", + storagePool)); } } @@ -147,15 +151,15 @@ private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCo Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getName() + ")"); } if (!answer.getResult()) { - String msg = "Unable to attach PowerFlex storage pool " + storagePool.getId() + " to host " + hostId; + String msg = "Unable to attach PowerFlex storage pool " + storagePool + " to host " + hostId; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish a connection from agent to PowerFlex storage pool " + storagePool.getId() + " due to " + answer.getDetails() + + throw new CloudRuntimeException("Unable to establish a connection from agent to PowerFlex storage pool " + storagePool + " due to " + answer.getDetails() + " (" + storagePool.getId() + ")"); } diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java index 4979fd1fa0aa..1b8fcee76bce 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java @@ -393,7 +393,7 @@ public void revertBlockCopyVolumeOperationsOnDeleteSuccess() throws Exception{ ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); when(client.deleteVolume(any())).thenReturn(true); VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT); @@ -430,7 +430,7 @@ public void revertBlockCopyVolumeOperationsOnDeleteFailure() throws Exception{ ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); when(client.deleteVolume(any())).thenReturn(false); VolumeVO volume = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT); @@ -461,7 +461,7 @@ public void deleteSourceVolumeSuccessScenarioAfterSuccessfulBlockCopy() throws E ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); when(client.deleteVolume(any())).thenReturn(true); scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); @@ -483,11 +483,11 @@ public void deleteSourceVolumeFailureScenarioAfterSuccessfulBlockCopy() throws E when(srcData.getTO()).thenReturn(volumeTO); when(volumeTO.getPath()).thenReturn(srcVolumePath); String sdcId = "7332760565f6340f"; - doReturn(sdcId).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(1L, 1L); + doReturn(sdcId).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(srcStore, host); ScaleIOGatewayClient client = Mockito.mock(ScaleIOGatewayClient.class); doReturn(client).when(scaleIOPrimaryDataStoreDriver) - .getScaleIOClient(any()); + .getScaleIOClient(any(DataStore.class)); doReturn(true).when(client).unmapVolumeFromSdc(any(), any()); when(client.deleteVolume(any())).thenReturn(false); @@ -510,7 +510,7 @@ public void deleteSourceVolumeFailureScenarioWhenNoSDCisFound() { when(srcData.getTO()).thenReturn(volumeTO); when(volumeTO.getPath()).thenReturn(srcVolumePath); String sdcId = "7332760565f6340f"; - doReturn(null).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(1L, 1L); + doReturn(null).when(scaleIOPrimaryDataStoreDriver).getConnectedSdc(srcStore, host); scaleIOPrimaryDataStoreDriver.deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); } diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index 52dcad519421..84a0bfe9398a 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -131,7 +131,7 @@ public void testAttachZone() throws Exception { ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class); ScaleIOGatewayClientConnectionPool pool = mock(ScaleIOGatewayClientConnectionPool.class); scaleIOGatewayClientConnectionPoolMocked.when(() -> ScaleIOGatewayClientConnectionPool.getInstance()).thenReturn(pool); - lenient().when(pool.getClient(1L, storagePoolDetailsDao)).thenReturn(client); + lenient().when(pool.getClient(dataStore, storagePoolDetailsDao)).thenReturn(client); lenient().when(client.haveConnectedSdcs()).thenReturn(true); From bd5915313ff1b39ba389c870a9ba3884647c8c92 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Mon, 21 Oct 2024 19:58:27 +0530 Subject: [PATCH 03/22] Improve logging to include more identifiable information for default volume storage plugin --- .../main/java/com/cloud/storage/SnapshotVO.java | 2 +- .../src/main/java/com/cloud/storage/VolumeVO.java | 4 ++-- .../storage/datastore/db/ImageStoreVO.java | 8 ++++++++ .../storage/image/store/ImageStoreImpl.java | 5 +++++ .../storage/snapshot/SnapshotObject.java | 8 ++++---- .../storage/datastore/PrimaryDataStoreImpl.java | 2 +- .../cloudstack/storage/volume/VolumeObject.java | 8 ++++++++ .../CloudStackPrimaryDataStoreDriverImpl.java | 15 ++++++++++----- .../CloudStackPrimaryDataStoreLifeCycleImpl.java | 13 +++++++------ 9 files changed, 46 insertions(+), 19 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java index 39d2cdd0b773..853dacac4701 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java @@ -283,7 +283,7 @@ public Class getEntityType() { @Override public String toString() { - return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) + return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("id", getId()).append("uuid", getUuid()).append("name", getName()) .append("volumeId", getVolumeId()).toString()); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index 45b6bd97c728..df7e7b7db2b1 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -514,10 +514,10 @@ public void setUpdated(Date updated) { @Override public String toString() { - return String.format("StoragePool %s", + return String.format("Volume %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "name", - "uuid", "volumeType", "instanceId", "path")); + "uuid", "volumeType", "instanceId")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java index 3ca9259c0997..d660960b713c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java @@ -33,6 +33,7 @@ import com.cloud.storage.ScopeType; import com.cloud.utils.UriUtils; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "image_store") @@ -215,4 +216,11 @@ public Long getUsedBytes() { public void setUsedBytes(Long usedBytes) { this.usedBytes = usedBytes; } + + @Override + public String toString() { + return String.format("ImageStoreVO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index d59f6d4c54dd..14db5ea57710 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -228,4 +228,9 @@ public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String pa return driver.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, bootable, fileSize, callback); } + @Override + public String toString() { + return imageDataStoreVO.toString(); + } + } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index ce21a723b243..5a3baceb7133 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -41,8 +41,6 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -471,7 +469,9 @@ public Class getEntityType() { @Override public String toString() { - return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) - .append("volumeId", getVolumeId()).append("path", getPath()).toString()); + return "VolumeObject{" + + "snapshotVO=" + getSnapshotVO() + + ", dataStore=" + getDataStore() + + '}'; } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index cdf37c5fc9e4..7d8b83377853 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -469,7 +469,7 @@ public StoragePoolType getParentPoolType() { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(pdsv, "id", "name", "uuid"); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 825a8cbd941c..c34b403ac0b0 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -944,4 +944,12 @@ public boolean isDeleteProtection() { public boolean isFollowRedirects() { return followRedirects; } + + @Override + public String toString() { + return "VolumeObject{" + + "volumeVO=" + volumeVO + + ", dataStore=" + dataStore + + '}'; + } } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 02a28b6e947d..02dfe155021c 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -256,7 +256,9 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal } } } catch (Exception ex) { - logger.debug("Unable to destroy volume" + data.getId(), ex); + logger.debug(String.format( + "Unable to destroy volume [id: %d, uuid: %s]", + data.getId(), data.getUuid()), ex); result.setResult(ex.toString()); } callback.complete(result); @@ -264,7 +266,10 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { - logger.debug(String.format("Copying volume %s(%s) to %s(%s)", srcdata.getId(), srcdata.getType(), destData.getId(), destData.getType())); + logger.debug( + "Copying volume [id: {}, uuid: {}, type:{}] to [id: {} uuid: {}, type: {}]", + srcdata.getId(), srcdata.getUuid(), srcdata.getType(), + destData.getId(), destData.getUuid(), destData.getType()); boolean encryptionRequired = anyVolumeRequiresEncryption(srcdata, destData); DataStore store = destData.getDataStore(); if (store.getRole() == DataStoreRole.Primary) { @@ -381,7 +386,7 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback Date: Wed, 23 Oct 2024 00:35:11 +0530 Subject: [PATCH 04/22] Improve logging to include more identifiable information for agent managers --- .../src/main/java/com/cloud/agent/Agent.java | 6 +- .../com/cloud/agent/api/StartupAnswer.java | 8 ++- .../cloud/agent/manager/AgentManagerImpl.java | 59 ++++++++++++------- .../manager/ClusteredAgentManagerImpl.java | 39 ++++++------ .../manager/ClusteredDirectAgentAttache.java | 4 +- .../agent/manager/DirectAgentAttache.java | 15 +++-- .../agentlb/AgentLoadBalancerPlanner.java | 3 +- .../ClusterBasedAgentLoadBalancerPlanner.java | 21 ++++--- .../com/cloud/cluster/ClusterManagerImpl.java | 6 +- .../cloud/cluster/ManagementServerHostVO.java | 5 +- 10 files changed, 106 insertions(+), 60 deletions(-) diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 15f010808aca..511d0bb0bdbb 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -594,7 +594,8 @@ public void processStartupAnswer(final Answer answer, final Response response, f return; } - logger.info("Process agent startup answer, agent id = {}", startup.getHostId()); + logger.info("Process agent startup answer, agent [id: {}, name: {}] connected to the server", + startup.getHostId(), startup.getHostName()); setId(startup.getHostId()); _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. @@ -604,7 +605,8 @@ public void processStartupAnswer(final Answer answer, final Response response, f _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); - logger.info("Startup Response Received: agent id = {}", getId()); + logger.info("Startup Response Received: agent [id: {}, name: {}]", + getId(), startup.getHostName()); } protected void processRequest(final Request request, final Link link) { diff --git a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java index 71652269b66f..ebd44b2a76ad 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java @@ -21,14 +21,16 @@ public class StartupAnswer extends Answer { long hostId; + String hostName; int pingInterval; protected StartupAnswer() { } - public StartupAnswer(StartupCommand cmd, long hostId, int pingInterval) { + public StartupAnswer(StartupCommand cmd, long hostId, String hostName, int pingInterval) { super(cmd); this.hostId = hostId; + this.hostName = hostName; this.pingInterval = pingInterval; } @@ -40,6 +42,10 @@ public long getHostId() { return hostId; } + public String getHostName() { + return hostName; + } + public int getPingInterval() { return pingInterval; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 27b3ac2d7511..326b008eecde 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -302,7 +302,8 @@ private AgentControlAnswer handleControlCommand(final AgentAttache attache, fina } } - logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId()); + logger.warn("No handling of agent control command: {} sent from [id: {} name: {}]", + cmd, attache.getId(), attache.getName()); return new AgentControlAnswer(cmd); } @@ -344,7 +345,7 @@ public Answer sendTo(final Long dcId, final HypervisorType type, final Command c answer = easySend(targetHostId, cmd); } catch (final Exception e) { String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(), - host.getUuid(), e.getLocalizedMessage()); + host, e.getLocalizedMessage()); logger.error(errorMsg); logger.debug(errorMsg, e); } @@ -464,11 +465,11 @@ protected Status investigate(final AgentAttache agent) { final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - logger.debug("Checking if agent ({}) is alive", hostId); + logger.debug("Checking if agent ({}) is alive", host); final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status); + logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", host, status); return status; } return _haMgr.investigate(hostId); @@ -493,7 +494,9 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce public long send(final Long hostId, final Commands commands, final Listener listener) throws AgentUnavailableException { final AgentAttache agent = getAttache(hostId); if (agent.isClosed()) { - throw new AgentUnavailableException("Agent " + agent.getId() + " is closed", agent.getId()); + throw new AgentUnavailableException(String.format( + "Agent [id: %d, name: %s] is closed", + agent.getId(), agent.getName()), agent.getId()); } final Command[] cmds = checkForCommandsAndTag(commands); @@ -510,7 +513,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { return; } final long hostId = attache.getId(); - logger.debug("Remove Agent : {}", hostId); + logger.debug("Remove Agent : [id: {}, name: {}]", hostId, attache.getName()); AgentAttache removed = null; boolean conflict = false; synchronized (_agents) { @@ -522,7 +525,8 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { } } if (conflict) { - logger.debug("Agent for host {} is created when it is being disconnected", hostId); + logger.debug("Agent for host [id: {}, name: {}] is created when it is being disconnected", + hostId, attache.getName()); } if (removed != null) { removed.disconnect(nextState); @@ -565,11 +569,15 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi } } else if (e instanceof HypervisorVersionChangedException) { handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + throw new CloudRuntimeException(String.format( + "Unable to connect [id: %d, name: %s]", + attache.getId(), attache.getName()), e); } else { logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); + throw new CloudRuntimeException(String.format( + "Unable to connect [id: %d, name: %s]", + attache.getId(), attache.getName()), e); } } } @@ -1004,21 +1012,26 @@ public void reconnect(final long hostId) throws AgentUnavailableException { } if (host.getRemoved() != null) { - throw new CloudRuntimeException("Host has already been removed: " + hostId); + throw new CloudRuntimeException(String.format( + "Host has already been removed: %s", host)); } if (host.getStatus() == Status.Disconnected) { - logger.debug("Host is already disconnected, no work to be done: {}", hostId); + logger.debug("Host is already disconnected, no work to be done: {}", host); return; } if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) { - throw new CloudRuntimeException("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); + throw new CloudRuntimeException(String.format( + "Unable to disconnect host because it is not in the correct state: host=%s; Status=%s", + host, host.getStatus())); } AgentAttache attache = findAttache(hostId); if (attache == null) { - throw new CloudRuntimeException("Unable to disconnect host because it is not connected to this server: " + hostId); + throw new CloudRuntimeException(String.format( + "Unable to disconnect host because it is not connected to this server: %s", + host)); } disconnectWithoutInvestigation(attache, Event.ShutdownRequested); } @@ -1118,7 +1131,8 @@ private AgentAttache sendReadyAndGetAttache(HostVO host, ReadyCommand ready, Lin joinLock.unlock(); } } else { - throw new ConnectionException(true, "Unable to acquire lock on host " + host.getUuid()); + throw new ConnectionException(true, + String.format("Unable to acquire lock on host %s", host)); } joinLock.releaseRef(); return attache; @@ -1240,7 +1254,7 @@ protected void connectAgent(final Link link, final Command[] cmds, final Request cmd = cmds[i]; if (cmd instanceof StartupRoutingCommand || cmd instanceof StartupProxyCommand || cmd instanceof StartupSecondaryStorageCommand || cmd instanceof StartupStorageCommand) { - answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, "", mgmtServiceConf.getPingInterval()); break; } } @@ -1349,16 +1363,16 @@ protected void processRequest(final Link link, final Request request) { if (cmd instanceof StartupRoutingCommand) { final StartupRoutingCommand startup = (StartupRoutingCommand) cmd; processStartupRoutingCommand(startup, hostId); - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupProxyCommand) { final StartupProxyCommand startup = (StartupProxyCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupSecondaryStorageCommand) { final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupStorageCommand) { final StartupStorageCommand startup = (StartupStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); @@ -1518,8 +1532,9 @@ public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, fin try { return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId); - throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage()); + logger.debug("Cannot transit agent status with event {} for host {}, management server id is {}", e, host, msId); + throw new CloudRuntimeException(String.format( + "Cannot transit agent status with event %s for host %s, management server id is %d, %s", e, host, msId, e1.getMessage())); } } finally { _agentStatusLock.unlock(); @@ -1600,7 +1615,7 @@ public boolean handleDirectConnectAgent(final Host host, final StartupCommand[] attache = createAttacheForDirectConnect(host, resource); final StartupAnswer[] answers = new StartupAnswer[cmds.length]; for (int i = 0; i < answers.length; i++) { - answers[i] = new StartupAnswer(cmds[i], attache.getId(), mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer(cmds[i], attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); } attache.process(answers); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index a7fea0f25331..58560dcd2ca7 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -216,10 +216,11 @@ private void scanDirectAgentToLoad() { } } - logger.debug("Loading directly connected host {}({})", host.getId(), host.getName()); + logger.debug("Loading directly connected host {}", host); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e); + logger.warn(" can not load directly connected host {}({}) due to ", + host, e); } } } @@ -243,9 +244,9 @@ public Task create(final Task.Type type, final Link link, final byte[] data) { return new ClusteredAgentHandler(type, link, data); } - protected AgentAttache createAttache(final long id) { - logger.debug("create forwarding ClusteredAgentAttache for {}", id); - final HostVO host = _hostDao.findById(id); + protected AgentAttache createAttache(final HostVO host) { + logger.debug("create forwarding ClusteredAgentAttache for {}", host); + long id = host.getId(); final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); AgentAttache old = null; synchronized (_agents) { @@ -261,7 +262,7 @@ protected AgentAttache createAttache(final long id) { @Override protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { - logger.debug("create ClusteredAgentAttache for {}", host.getId()); + logger.debug("create ClusteredAgentAttache for {}", host); final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -329,7 +330,9 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId); if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { - logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId); + logger.debug( + "Not processing {} event for the host [id: {}, name: {}] as the host is being connected to {}", + Event.AgentDisconnected, hostId, attache.getName(), _nodeId); return true; } } @@ -545,8 +548,8 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId); - agent = createAttache(hostId); + logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", host); + agent = createAttache(host); } } if (agent == null) { @@ -814,22 +817,24 @@ public void startRebalanceAgents() { List hostsToRebalance = new ArrayList(); for (final AgentLoadBalancerPlanner lbPlanner : _lbPlanners) { - hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad); + hostsToRebalance = lbPlanner.getHostsToRebalance(node, avLoad); if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { break; } - logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); + logger.debug( + "Agent load balancer planner {} found no hosts to be rebalanced from management server {}", + lbPlanner.getName(), node); } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid()); for (final HostVO host : hostsToRebalance) { final long hostId = host.getId(); - logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId); + logger.debug("Asking management server {} to give away host id={}", node, host); boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - logger.warn("Somebody else is already rebalancing host id: {}", hostId); + logger.warn("Somebody else is already rebalancing host id: {}", host); continue; } @@ -838,11 +843,11 @@ public void startRebalanceAgents() { transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); if (answer == null) { - logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid()); + logger.warn("Failed to get host {} from management server {}", host, node); result = false; } } catch (final Exception ex) { - logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex); + logger.warn("Failed to get host {} from management server {}", host, node, ex); result = false; } finally { if (transfer != null) { @@ -857,7 +862,7 @@ public void startRebalanceAgents() { } } } else { - logger.debug("Found no hosts to rebalance from the management server {}", node.getMsid()); + logger.debug("Found no hosts to rebalance from the management server {}", node); } } } @@ -1119,7 +1124,7 @@ protected boolean startRebalance(final long hostId) { final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId); if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); - final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); + final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host); if (forwardAttache == null) { logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId); return false; diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java index ac1076a9ff0d..3d18951fb726 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java @@ -37,9 +37,9 @@ public void routeToAgent(byte[] data) throws AgentUnavailableException { try { req = Request.parse(data); } catch (ClassNotFoundException e) { - throw new CloudRuntimeException("Unable to rout to an agent ", e); + throw new CloudRuntimeException("Unable to route to an agent ", e); } catch (UnsupportedVersionException e) { - throw new CloudRuntimeException("Unable to rout to an agent ", e); + throw new CloudRuntimeException("Unable to route to an agent ", e); } if (req instanceof Response) { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 927da34104ff..6f648f5dda06 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -115,7 +115,9 @@ public void process(Answer[] answers) { if (answers != null && answers[0] instanceof StartupAnswer) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); - logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval); + logger.info( + "StartupAnswer received [id: {} name: {} Interval: {}]", + startup.getHostId(), startup.getHostName(), interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } } @@ -140,7 +142,8 @@ private synchronized void queueTask(Task task) { } private synchronized void scheduleFromQueue() { - logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get()); + logger.trace("Agent attache [id: {}, name: {}], task queue size={}, outstanding tasks={}", + _id, _name, tasks.size(), _outstandingTaskCount.get()); while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); _agentMgr.getDirectAgentPool().execute(tasks.remove()); @@ -152,7 +155,9 @@ protected class PingTask extends ManagedContextRunnable { protected synchronized void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); + logger.warn( + "PingTask execution for direct attache [id: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _name, _agentMgr.getDirectAgentThreadCap()); return; } @@ -219,7 +224,9 @@ protected void runInContext() { long seq = _req.getSequence(); try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); + logger.warn( + "CronTask execution for direct attache [id: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _name, _agentMgr.getDirectAgentThreadCap()); bailout(); return; } diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java index 7d139e5be149..e73776d134d2 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/AgentLoadBalancerPlanner.java @@ -18,11 +18,12 @@ import java.util.List; +import com.cloud.cluster.ManagementServerHostVO; import com.cloud.host.HostVO; import com.cloud.utils.component.Adapter; public interface AgentLoadBalancerPlanner extends Adapter { - List getHostsToRebalance(long msId, int avLoad); + List getHostsToRebalance(ManagementServerHostVO ms, int avLoad); } diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java index 641ae4414805..5b05b4df0423 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java @@ -26,6 +26,7 @@ import javax.inject.Inject; +import com.cloud.cluster.ManagementServerHostVO; import org.springframework.stereotype.Component; import com.cloud.host.Host; @@ -43,15 +44,17 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements HostDao _hostDao = null; @Override - public List getHostsToRebalance(long msId, int avLoad) { + public List getHostsToRebalance(ManagementServerHostVO ms, int avLoad) { + long msId = ms.getMsid(); QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); sc.and(sc.entity().getManagementServerId(), Op.EQ, msId); List allHosts = sc.list(); if (allHosts.size() <= avLoad) { - logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + - "; so it doesn't participate in agent rebalancing process"); + logger.debug("Agent load = {} for management server {} doesn't exceed average " + + "system agent load = {}; so it doesn't participate in agent rebalancing process", + allHosts.size(), ms, avLoad); return null; } @@ -62,8 +65,9 @@ public List getHostsToRebalance(long msId, int avLoad) { List directHosts = sc.list(); if (directHosts.isEmpty()) { - logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + - "; so it doesn't participate in agent rebalancing process"); + logger.debug("No direct agents in status {} exist for the management server " + + "{}; so it doesn't participate in agent rebalancing process", + Status.Up, ms); return null; } @@ -88,8 +92,9 @@ public List getHostsToRebalance(long msId, int avLoad) { int hostsLeft = directHosts.size(); List hostsToReturn = new ArrayList(); - logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + - " and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away..."); + logger.debug("Management server {} can give away {} as it currently owns {} and the " + + "average agent load in the system is {}; finalyzing list of hosts to give away...", + ms, hostsToGive, allHosts.size(), avLoad); for (Long cluster : hostToClusterMap.keySet()) { List hostsInCluster = hostToClusterMap.get(cluster); hostsLeft = hostsLeft - hostsInCluster.size(); @@ -113,7 +118,7 @@ public List getHostsToRebalance(long msId, int avLoad) { } } - logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); + logger.debug("Management server {} is ready to give away {} hosts", ms, hostsToReturn.size()); return hostsToReturn; } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index 0ec566a4194c..ca7fba2692fd 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -110,6 +110,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C // _msid is the unique persistent identifier that peer name is based upon // private Long _mshostId = null; + private ManagementServerHostVO _mshost = null; protected long _msId = ManagementServerNode.getManagementServerId(); protected long _runId = System.currentTimeMillis(); @@ -1072,9 +1073,10 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { } }); + _mshost = mshost; _mshostId = mshost.getId(); if (logger.isInfoEnabled()) { - logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); + logger.info("Management server (host id : {}) is being started at {}:{}", _mshost, _clusterNodeIP, _currentServiceAdapter.getServicePort()); } _mshostPeerDao.clearPeerInfo(_mshostId); @@ -1094,7 +1096,7 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { @DB public boolean stop() { if (logger.isInfoEnabled()) { - logger.info("Stopping Cluster manager, msid : " + _msId); + logger.info("Stopping Cluster manager, msid : {}, runId : {}, host : {}",_msId, _runId, _mshost); } if (_mshostId != null) { diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java index 2918ccd22d7c..cbd501d27e49 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "mshost") @@ -199,7 +200,9 @@ public void setAlertCount(int count) { @Override public String toString() { - return new StringBuilder("ManagementServer[").append("-").append(id).append("-").append(msid).append("-").append(state).append("]").toString(); + return String.format("ManagementServer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "msid", "name", "type")); } @Override From c4dcb2b515636544c8282c96b219dc8553d78b43 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Wed, 23 Oct 2024 12:11:37 +0530 Subject: [PATCH 05/22] Fixup --- .../cloud/agent/manager/AgentManagerImpl.java | 94 +++++++++++-------- .../manager/ClusteredAgentManagerImpl.java | 30 +++--- .../entity/api/db/EngineHostPodVO.java | 8 ++ .../src/main/java/com/cloud/dc/HostPodVO.java | 7 ++ .../storage/image/store/TemplateObject.java | 8 ++ .../storage/volume/VolumeObject.java | 4 +- .../com/cloud/cluster/ClusterManagerImpl.java | 41 ++++---- .../cloudstack/kvm/ha/KVMHAProvider.java | 4 +- .../kvm/ha/KVMHostActivityChecker.java | 4 +- .../CloudStackPrimaryDataStoreDriverImpl.java | 4 +- ...oudStackPrimaryDataStoreLifeCycleImpl.java | 9 +- .../driver/ScaleIOPrimaryDataStoreDriver.java | 34 ++++--- .../manager/ScaleIOSDCManagerImpl.java | 12 +-- .../provider/ScaleIOHostListener.java | 4 +- 14 files changed, 148 insertions(+), 115 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 326b008eecde..4be53d9f19be 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -559,11 +559,13 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi if (e instanceof ConnectionException) { final ConnectionException ce = (ConnectionException)e; if (ce.isSetupError()) { - logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); + logger.warn("Monitor {} says there is an error in the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw ce; } else { - logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); + logger.info("Monitor {} says not to continue the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); return attache; } @@ -573,7 +575,8 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi "Unable to connect [id: %d, name: %s]", attache.getId(), attache.getName()), e); } else { - logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); + logger.error("Monitor {} says there is an error in the connect process for {} due to {}", + monitor.second().getClass().getSimpleName(), host, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw new CloudRuntimeException(String.format( "Unable to connect [id: %d, name: %s]", @@ -598,7 +601,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi Map detailsMap = readyAnswer.getDetailsMap(); if (detailsMap != null) { String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE); - logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid()); + logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host); if (uefiEnabled != null) { _hostDao.loadDetails(host); if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { @@ -715,14 +718,14 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if (discoverer == null) { - logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType()); + logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host, host.getHypervisorType()); resource = loadResourcesWithoutHypervisor(host); } else { resource = discoverer.reloadResource(host); } if (resource == null) { - logger.warn("Unable to load the resource: {}", host.getId()); + logger.warn("Unable to load the resource: {}", host); return false; } @@ -748,7 +751,7 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { - logger.debug("create DirectAgentAttache for {}", host.getId()); + logger.debug("create DirectAgentAttache for {}", host); final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; @@ -774,7 +777,7 @@ public boolean stop() { for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - logger.debug("Cant not find host {}", agent.getId()); + logger.debug("Cannot find host [id: {}, name: {}]", agent.getId(), agent.getName()); } else { if (!agent.forForward()) { agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); @@ -792,17 +795,17 @@ protected Status getNextStatusOnDisconnection(Host host, final Status.Event even final Status currentStatus = host.getStatus(); Status nextStatus; if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) { - logger.debug("Host {} is already {}", host.getUuid(), currentStatus); + logger.debug("Host {} is already {}", host, currentStatus); nextStatus = currentStatus; } else { try { nextStatus = currentStatus.getNextStatus(event); } catch (final NoTransitionException e) { - final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host.getUuid()); + final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host); logger.debug(err); throw new CloudRuntimeException(err); } - logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus); + logger.debug("The next status of agent {} is {}, current status is {}", host, nextStatus, currentStatus); } return nextStatus; } @@ -814,17 +817,19 @@ protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attach GlobalLock joinLock = getHostJoinLock(hostId); if (joinLock.lock(60)) { try { - logger.info("Host {} is disconnecting with event {}", hostId, event); + logger.info("Host [id: {}, name: {}] is disconnecting with event {}", + hostId, attache.getName(), event); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.warn("Can't find host with {}", hostId); + logger.warn("Can't find host with {} (name: {})", hostId, attache.getName()); nextStatus = Status.Removed; } else { nextStatus = getNextStatusOnDisconnection(host, event); caService.purgeHostCertificate(host); } - logger.debug("Deregistering link for {} with state {}", hostId, nextStatus); + logger.debug("Deregistering link for [id: {}, name: {}] with state {}", + hostId, attache.getName(), nextStatus); removeAgent(attache, nextStatus); @@ -859,28 +864,30 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ - logger.info("Investigating why host {} has disconnected with event", hostId, event); + logger.info("Investigating why host {} has disconnected with event", host, event); Status determinedState = investigate(attache); // if state cannot be determined do nothing and bail out if (determinedState == null) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value()); + logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", + host, AlertWait, AlertWait.value()); determinedState = Status.Alert; } else { - logger.warn("Agent {} state cannot be determined, do nothing", hostId); + logger.warn("Agent {} state cannot be determined, do nothing", host); return false; } } final Status currentStatus = host.getStatus(); - logger.info("The agent from host {} state determined is {}", hostId, determinedState); + logger.info("The agent from host {} state determined is {}", host, determinedState); if (determinedState == Status.Down) { - final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs"; + final String message = String.format("Host %s is down. Starting HA on the VMs", host); logger.error(message); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), + host.getPodId(), String.format("Host down, %s", host), message); } event = Status.Event.HostDown; } else if (determinedState == Status.Up) { @@ -889,15 +896,14 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, agentStatusTransitTo(host, Status.Event.Ping, _nodeId); return false; } else if (determinedState == Status.Disconnected) { - logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() + - '-' + host.getResourceState()); + logger.warn("Agent is disconnected but the host is still up: {} state: {}", host, host.getResourceState()); if (currentStatus == Status.Disconnected || (currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId()); + logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host); event = Status.Event.WaitedTooLong; } else { - logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId()); + logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host); return false; } } else if (currentStatus == Status.Up) { @@ -915,12 +921,14 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); final String podName = podVO != null ? podVO.getName() : "NO POD"; - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podName; - _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host in ALERT state, " + hostDesc, - "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName()); + final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podName); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, + host.getDataCenterId(), host.getPodId(), + String.format("Host in ALERT state, %s", hostDesc), + String.format("In availability zone %s, host is in alert state: %s", dcVO, host)); } } else { - logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId()); + logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host); } } handleDisconnectWithoutInvestigation(attache, event, true, true); @@ -966,7 +974,7 @@ public Answer easySend(final Long hostId, final Command cmd) { } final Status status = h.getStatus(); if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { - logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId); + logger.debug("Can not send command {} due to Host {} not being up", cmd, h); return null; } final Answer answer = send(hostId, cmd); @@ -1083,7 +1091,7 @@ public boolean isAgentAttached(final long hostId) { } protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { - logger.debug("create ConnectedAgentAttache for {}", host.getId()); + logger.debug("create ConnectedAgentAttache for {}", host); final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); @@ -1284,7 +1292,7 @@ private void processHostHealthCheckResult(Boolean hostHealthCheckResult, long ho } if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) { logger.debug("{} is disabled for the cluster {}, cannot process the health check result " + - "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()); + "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host); return; } @@ -1294,10 +1302,10 @@ private void processHostHealthCheckResult(Boolean hostHealthCheckResult, long ho logger.info("Host health check {}, auto {} KVM host: {}", hostHealthCheckResult ? "succeeds" : "fails", hostHealthCheckResult ? "enabling" : "disabling", - host.getName()); + host); _resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent); } catch (NoTransitionException e) { - logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e); + logger.error("Cannot Auto {} host: {}", resourceEvent, host, e); } } @@ -1376,7 +1384,9 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail()); + logger.info( + "Host [id: {}, name: {}] has informed us that it is shutting down with reason {} and detail {}", + attache.getId(), attache.getName(), reason, shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); @@ -1406,7 +1416,7 @@ protected void processRequest(final Link link, final Request request) { // gateway (cannot ping the default route) final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + final String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO, podVO); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId(), "Host lost connection to gateway, " + hostDesc, "Host [" + hostDesc + "] lost connection to gateway (default route) and is possibly having network connection issues."); @@ -1424,7 +1434,7 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - logger.debug("Cant not find host {}", attache.getId()); + logger.debug("Cant not find host id: {}(name: {})", attache.getId(), attache.getName()); } answer = new Answer(cmd); } else { @@ -1456,7 +1466,8 @@ protected void processResponse(final Link link, final Response response) { if (attache == null) { logger.warn("Unable to process: {}", response); } else if (!attache.processAnswers(response.getSequence(), response)) { - logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response); + logger.info("Host [id: {}, name: {}] - Seq {}: Response is not processed: {}", + attache.getId(), attache.getName(), response.getSequence(), response); } } @@ -1526,7 +1537,8 @@ public boolean tapLoadingAgents(final Long hostId, final TapAgentsAction action) public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { try { _agentStatusLock.lock(); - logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName()); + logger.debug("[Resource state = {}, Agent event = , Host = {}]", + host.getResourceState(), e.toString(), host); host.setManagementServerId(msId); try { @@ -1686,7 +1698,7 @@ protected void runInContext() { /* * Host is in non-operation state, so no investigation and direct put agent to Disconnected */ - logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState); + logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", h, resourceState); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { final HostVO host = _hostDao.findById(agentId); @@ -1696,7 +1708,7 @@ protected void runInContext() { logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId()); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { - logger.debug("Ping timeout for agent {}, do investigation", agentId); + logger.debug("Ping timeout for agent {}, do investigation", h); disconnectWithInvestigation(agentId, Event.PingTimeout); } } @@ -1859,7 +1871,7 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool Commands c = new Commands(cmds); send(host.getId(), c, this); } catch (AgentUnavailableException e) { - logger.debug("Failed to send host params on host: " + host.getId()); + logger.debug("Failed to send host params on host: {}", host); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 58560dcd2ca7..a85c7652d594 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -341,7 +341,9 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A // don't process disconnect if the disconnect came for the host via delayed cluster notification, // but the host has already reconnected to the current management server if (!attache.forForward()) { - logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId); + logger.debug( + "Not processing {} event for the host [id: {}, name: {}] as the host is directly connected to the current management server {}", + Event.AgentDisconnected, hostId, attache.getName(), _nodeId); return true; } @@ -715,12 +717,12 @@ public void onManagementNodeJoined(final List no @Override public void onManagementNodeLeft(final List nodeList, final long selfNodeId) { for (final ManagementServerHost vo : nodeList) { - logger.info("Marking hosts as disconnected on Management server {}", vo.getMsid()); + logger.info("Marking hosts as disconnected on Management server {}", vo); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing); outOfBandManagementDao.expireServerOwnership(vo.getMsid()); haConfigDao.expireServerOwnership(vo.getMsid()); - logger.info("Deleting entries from op_host_transfer table for Management server {}", vo.getMsid()); + logger.info("Deleting entries from op_host_transfer table for Management server {}", vo); cleanupTransferMap(vo.getMsid()); } } @@ -827,7 +829,7 @@ public void startRebalanceAgents() { } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { - logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid()); + logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node); for (final HostVO host : hostsToRebalance) { final long hostId = host.getId(); logger.debug("Asking management server {} to give away host id={}", node, host); @@ -1032,7 +1034,7 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName()); + logger.debug("Disconnecting host {} as a part of rebalance process without notification", host); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1040,21 +1042,21 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } if (result) { - logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId); + logger.debug("Loading directly connected host {} to the management server {} as a part of rebalance process", host, _nodeId); result = loadDirectlyConnectedHost(host, true); } else { - logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName()); + logger.warn("Failed to disconnect {} as a part of rebalance process without notification", host); } } catch (final Exception ex) { - logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex); + logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId, ex); result = false; } if (result) { - logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); + logger.debug("Successfully loaded directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); } else { - logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); + logger.warn("Failed to load directly connected host {} to the management server {} a part of rebalance process without notification", host, _nodeId); } } @@ -1126,18 +1128,18 @@ protected boolean startRebalance(final long hostId) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host); if (forwardAttache == null) { - logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId); + logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", host); return false; } - logger.debug("Putting agent id={} to transfer mode", hostId); + logger.debug("Putting agent {} to transfer mode", host); forwardAttache.setTransferMode(true); _agents.put(hostId, forwardAttache); } else { if (attache == null) { - logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId); + logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", host, _nodeId); } else { logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing", - hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); + host, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } return false; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java index 684b882fe8a7..f9fc4421ce5a 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java @@ -38,6 +38,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host_pod_ref") @@ -246,4 +247,11 @@ public Date getLastUpdated() { public State getState() { return state; } + + @Override + public String toString() { + return String.format("EngineHostPod %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index d9971815f5e6..4e50c95f24e3 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -31,6 +31,7 @@ import com.cloud.org.Grouping; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host_pod_ref") @@ -197,4 +198,10 @@ public String getUuid() { public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + return String.format("HostPod %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index b7d83c702231..9eb087603911 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -596,4 +596,12 @@ public Date getUpdated() { public boolean isFollowRedirects() { return followRedirects; } + + @Override + public String toString() { + return "VolumeObject{" + + "templateVO=" + getImage() + + ", dataStore=" + getDataStore() + + '}'; + } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index c34b403ac0b0..d85ade0143d7 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -948,8 +948,8 @@ public boolean isFollowRedirects() { @Override public String toString() { return "VolumeObject{" + - "volumeVO=" + volumeVO + - ", dataStore=" + dataStore + + "volumeVO=" + getVolume() + + ", dataStore=" + getDataStore() + '}'; } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index ca7fba2692fd..c78bf105c939 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -381,11 +381,11 @@ public void broadcast(final long agentId, final String cmds) { } try { if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); + logger.debug("Forwarding {} to {}", cmds, peer); } executeAsync(peerName, agentId, cmds, true); } catch (final Exception e) { - logger.warn("Caught exception while talking to " + peer.getMsid()); + logger.warn("Caught exception while talking to {}", peer); } } } @@ -409,11 +409,11 @@ public void publishStatus(final String status) { final String peerName = Long.toString(peer.getMsid()); try { if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + status + " to " + peer.getMsid()); + logger.debug("Forwarding {} to {}", status, peer); } sendStatus(peerName, status); } catch (final Exception e) { - String msg = String.format("Caught exception while talking to %d", peer.getMsid()); + String msg = String.format("Caught exception while talking to %s", peer); logger.warn(msg); logger.debug(msg, e); } @@ -504,7 +504,7 @@ public void notifyNodeJoined(final List nodeList) { logger.debug("Notify management server node join to listeners."); for (final ManagementServerHostVO mshost : nodeList) { - logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Joining node, IP: {}, ms: {}", mshost.getServiceIP(), mshost); } } @@ -524,7 +524,7 @@ public void notifyNodeLeft(final List nodeList) { for (final ManagementServerHostVO mshost : nodeList) { if (logger.isDebugEnabled()) { - logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Leaving node, IP: {}, ms: {}", mshost.getServiceIP(), mshost); } cancelClusterRequestToPeer(String.valueOf(mshost.getMsid())); } @@ -812,8 +812,7 @@ private void initPeerScan() { if (logger.isInfoEnabled()) { logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); for (final ManagementServerHostVO host : inactiveList) { - logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + - ", version: " + host.getVersion()); + logger.info("management server node ms: {}, service ip: {}, version: {}", host, host.getServiceIP(), host.getVersion()); } } @@ -821,7 +820,7 @@ private void initPeerScan() { for (final ManagementServerHostVO host : inactiveList) { // Check if peer state is Up in the period if (!_mshostPeerDao.isPeerUpState(_mshostId, host.getId(), new Date(cutTime.getTime() - HeartbeatThreshold.value()))) { - logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and did not send node status to this node"); + logger.warn("Management node {} is detected inactive by timestamp and did not send node status to this node", host); downHostList.add(host); } } @@ -866,7 +865,8 @@ private void peerScan() throws ActiveFencingException { if (current == null) { if (entry.getKey().longValue() != _mshostId.longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + logger.debug("Detected management node left {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } removedNodeList.add(entry.getValue()); } @@ -874,15 +874,16 @@ private void peerScan() throws ActiveFencingException { if (current.getRunid() == 0) { if (entry.getKey().longValue() != _mshostId.longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + - entry.getValue().getServiceIP()); + logger.debug("Detected management node left because of invalidated session {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } invalidatedNodeList.add(entry.getValue()); } } else { if (entry.getValue().getRunid() != current.getRunid()) { if (logger.isDebugEnabled()) { - logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + logger.debug("Detected management node left and rejoined quickly {}, nodeIP:{}", + entry.getValue(), entry.getValue().getServiceIP()); } entry.getValue().setRunid(current.getRunid()); @@ -955,7 +956,7 @@ private void processRemovedNodes(Date cutTime, List remo final ManagementServerHostVO mshost = it.next(); // Check if peer state is Up in the period if (!_mshostPeerDao.isPeerUpState(_mshostId, mshost.getId(), new Date(cutTime.getTime() - HeartbeatThreshold.value()))) { - logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and did not send node status to this node"); + logger.warn("Management node {} is detected inactive by timestamp and did not send node status to this node", mshost); _activePeers.remove(mshost.getId()); try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); @@ -963,7 +964,7 @@ private void processRemovedNodes(Date cutTime, List remo logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } else { - logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but sent node status to this node"); + logger.info("Management node {} is detected inactive by timestamp but sent node status to this node", mshost); it.remove(); } } @@ -980,7 +981,7 @@ private void processNewNodes(Date cutTime, List currentL _activePeers.put(mshost.getId(), mshost); if (logger.isDebugEnabled()) { - logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); + logger.debug("Detected management node joined, {}, nodeIP:{}", mshost, mshost.getServiceIP()); } newNodeList.add(mshost); @@ -1059,13 +1060,13 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { mshost.setUuid(UUID.randomUUID().toString()); _mshostDao.persist(mshost); if (logger.isInfoEnabled()) { - logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); + logger.info("New instance of management server {}, runId {} is being started", mshost, _runId); } } else { _mshostDao.update(mshost.getId(), _runId, NetUtils.getCanonicalHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); if (logger.isInfoEnabled()) { - logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); + logger.info("Management server {}, runId {} is being started", mshost, _runId); } } @@ -1076,7 +1077,7 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { _mshost = mshost; _mshostId = mshost.getId(); if (logger.isInfoEnabled()) { - logger.info("Management server (host id : {}) is being started at {}:{}", _mshost, _clusterNodeIP, _currentServiceAdapter.getServicePort()); + logger.info("Management server (host : {}) is being started at {}:{}", _mshost, _clusterNodeIP, _currentServiceAdapter.getServicePort()); } _mshostPeerDao.clearPeerInfo(_mshostId); @@ -1096,7 +1097,7 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { @DB public boolean stop() { if (logger.isInfoEnabled()) { - logger.info("Stopping Cluster manager, msid : {}, runId : {}, host : {}",_msId, _runId, _mshost); + logger.info("Stopping Cluster manager, msid : {}, runId : {}, host : {}", _msId, _runId, _mshost); } if (_mshostId != null) { diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java index 0e7a6f233b03..b937be5265b7 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java @@ -78,7 +78,7 @@ public boolean recover(Host r) throws HARecoveryException { } } catch (Exception e){ logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); - throw new HARecoveryException(" OOBM service is not configured or enabled for this host " + r, e); + throw new HARecoveryException(String.format(" OOBM service is not configured or enabled for this host %s", r), e); } } @@ -95,7 +95,7 @@ public boolean fence(Host r) throws HAFenceException { } } catch (Exception e){ logger.warn("OOBM service is not configured or enabled for this host {} error is {}", r, e.getMessage()); - throw new HAFenceException("OBM service is not configured or enabled for this host " + r.getName() , e); + throw new HAFenceException(String.format("OBM service is not configured or enabled for this host %s", r.getName()), e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index eec2b26ebb63..0692726852a0 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -67,7 +67,7 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheck @Override public boolean isActive(Host r, DateTime suspectTime) throws HACheckerException { try { - return isVMActivtyOnHost(r, suspectTime); + return isVMActivityOnHost(r, suspectTime); } catch (HACheckerException e) { //Re-throwing the exception to avoid poluting the 'HACheckerException' already thrown throw e; @@ -146,7 +146,7 @@ private boolean isAgentActive(Host agent) { return hostStatus == Status.Up; } - private boolean isVMActivtyOnHost(Host agent, DateTime suspectTime) throws HACheckerException { + private boolean isVMActivityOnHost(Host agent, DateTime suspectTime) throws HACheckerException { if (agent.getHypervisorType() != Hypervisor.HypervisorType.KVM && agent.getHypervisorType() != Hypervisor.HypervisorType.LXC) { throw new IllegalStateException(String.format("Calling KVM investigator for non KVM Host of type [%s].", agent.getHypervisorType())); } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 02dfe155021c..bc2e4f5da6f4 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -386,7 +386,7 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback")); + logger.debug("Initiating copy from PowerFlex template volume on host {}", destHost != null ? destHost : ""); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); @@ -834,7 +833,7 @@ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Hos protected Answer copyOfflineVolume(DataObject srcData, DataObject destData, Host destHost) { // Copy PowerFlex/ScaleIO volume - logger.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); + logger.debug("Initiating copy from PowerFlex template volume on host {}", destHost != null ? destHost : ""); String value = configDao.getValue(Config.CopyVolumeWait.key()); int copyVolumeWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); @@ -876,7 +875,7 @@ public Answer liveMigrateVolume(DataObject srcData, DataObject destData) { GetVolumeStatCommand statCmd = new GetVolumeStatCommand(srcVolumeInfo.getPath(), srcVolumeInfo.getStoragePoolType(), srcStore.getUuid()); GetVolumeStatAnswer statAnswer = (GetVolumeStatAnswer) ep.sendMessage(statCmd); if (!statAnswer.getResult() ) { - logger.warn(String.format("Unable to get volume %s stats", srcVolumeInfo.getId())); + logger.warn(String.format("Unable to get volume %s stats", srcVolumeInfo)); } else if (statAnswer.getVirtualSize() > 0) { srcVolumeUsableSize = statAnswer.getVirtualSize(); } @@ -897,15 +896,15 @@ public Answer liveMigrateVolume(DataObject srcData, DataObject destData) { updateVolumeAfterCopyVolume(srcData, destData); updateSnapshotsAfterCopyVolume(srcData, destData); deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); - logger.debug(String.format("Successfully migrated migrate PowerFlex volume %d to storage pool %d", srcVolumeId, destPoolId)); + logger.debug("Successfully migrated migrate PowerFlex volume {} to storage pool {}", srcData, destStore); answer = new Answer(null, true, null); } else { - String errorMsg = "Failed to migrate PowerFlex volume: " + srcVolumeId + " to storage pool " + destPoolId; + String errorMsg = String.format("Failed to migrate PowerFlex volume: %s to storage pool %s", srcData, destStore); logger.debug(errorMsg); answer = new Answer(null, false, errorMsg); } } catch (Exception e) { - logger.error("Failed to migrate PowerFlex volume: " + srcVolumeId + " due to: " + e.getMessage()); + logger.error("Failed to migrate PowerFlex volume: {} due to: {}", srcData, e.getMessage()); answer = new Answer(null, false, e.getMessage()); } @@ -1158,12 +1157,12 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { answer = new Answer(null, true, null); } else { - String errorMsg = "Failed to migrate PowerFlex volume: " + srcData.getId() + " to storage pool " + destPoolId; + String errorMsg = String.format("Failed to migrate PowerFlex volume: %s to storage pool %d", srcData, destPoolId); logger.debug(errorMsg); answer = new Answer(null, false, errorMsg); } } catch (Exception e) { - logger.error("Failed to migrate PowerFlex volume: " + srcData.getId() + " due to: " + e.getMessage()); + logger.error("Failed to migrate PowerFlex volume: {} due to: {}", srcData, e.getMessage()); answer = new Answer(null, false, e.getMessage()); } @@ -1395,8 +1394,7 @@ public Map getCustomStorageStats(StoragePool pool) { int connectedSdcsCount = client.getConnectedSdcsCount(); customStats.put(ScaleIOUtil.CONNECTED_SDC_COUNT_STAT, String.valueOf(connectedSdcsCount)); } catch (Exception e) { - String errMsg = "Unable to get custom storage stats for the pool: " + pool.getId() + " due to " + e.getMessage(); - logger.error(errMsg); + logger.error("Unable to get custom storage stats for the pool: {} due to {}", pool, e.getMessage()); } return customStats; @@ -1484,8 +1482,8 @@ private void alertHostSdcDisconnection(Host host) { return; } - logger.warn("SDC not connected on the host: " + host.getId()); - String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM"; + logger.warn("SDC not connected on the host: {}", host); + String msg = String.format("SDC not connected on the host: %s, reconnect the SDC to MDM", host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java index 003fcd617697..b9965d951099 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java @@ -214,19 +214,19 @@ private String prepareSDCOnHost(Host host, DataStore dataStore, String systemId) try { prepareStorageClientAnswer = (PrepareStorageClientAnswer) agentManager.send(host.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Failed to prepare SDC on the host %s, due to: %s", host.getName(), e.getMessage()); + String err = String.format("Failed to prepare SDC on the host %s, due to: %s", host, e.getMessage()); logger.error(err); throw new CloudRuntimeException(err); } if (prepareStorageClientAnswer == null) { - String err = String.format("Unable to prepare SDC on the host %s", host.getName()); + String err = String.format("Unable to prepare SDC on the host %s", host); logger.error(err); throw new CloudRuntimeException(err); } if (!prepareStorageClientAnswer.getResult()) { - String err = String.format("Unable to prepare SDC on the host %s, due to: %s", host.getName(), prepareStorageClientAnswer.getDetails()); + String err = String.format("Unable to prepare SDC on the host %s, due to: %s", host, prepareStorageClientAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -306,14 +306,12 @@ private boolean unprepareSDCOnHost(Host host, DataStore dataStore) { try { unprepareStorageClientAnswer = agentManager.send(host.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Failed to unprepare SDC on the host %s due to: %s", host.getName(), e.getMessage()); - logger.error(err); + logger.error("Failed to unprepare SDC on the host {} due to: {}", host, e.getMessage()); return false; } if (!unprepareStorageClientAnswer.getResult()) { - String err = String.format("Unable to unprepare SDC on the the host %s due to: %s", host.getName(), unprepareStorageClientAnswer.getDetails()); - logger.error(err); + logger.error("Unable to unprepare SDC on the the host {} due to: {}", host, unprepareStorageClientAnswer.getDetails()); return false; } return true; diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index de0611a523a6..29e52338a095 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -109,7 +109,7 @@ private String getSdcIdOfHost(HostVO host, StoragePool storagePool) { ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId); Map poolDetails = answer.getPoolInfo().getDetails(); if (MapUtils.isEmpty(poolDetails)) { - String msg = "PowerFlex storage SDC details not found on the host: " + hostId + ", (re)install SDC and restart agent"; + String msg = String.format("PowerFlex storage SDC details not found on the host: %s, (re)install SDC and restart agent", host); logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC not found on host: " + host.getUuid(), msg); return null; @@ -124,7 +124,7 @@ private String getSdcIdOfHost(HostVO host, StoragePool storagePool) { } if (StringUtils.isBlank(sdcId)) { - String msg = "Couldn't retrieve PowerFlex storage SDC details from the host: " + hostId + ", (re)install SDC and restart agent"; + String msg = String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, (re)install SDC and restart agent", host); logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg); return null; From 0bbc2cb85927268746e9eb8216b3c8782a62d720 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Wed, 23 Oct 2024 12:12:58 +0530 Subject: [PATCH 06/22] Improve logging to include more identifiable information for Listeners --- .../cloud/vm/VirtualMachineManagerImpl.java | 76 +++---- .../entity/api/db/EngineClusterVO.java | 8 + .../entity/api/db/EngineDataCenterVO.java | 8 + .../entity/api/db/EngineHostVO.java | 5 +- .../orchestration/NetworkOrchestrator.java | 199 +++++++++--------- .../src/main/java/com/cloud/dc/HostPodVO.java | 5 +- .../network/dao/Site2SiteVpnConnectionVO.java | 8 + .../cloud/network/rules/FirewallRuleVO.java | 5 +- .../java/com/cloud/network/vpc/VpcVO.java | 6 +- .../java/com/cloud/storage/VMTemplateVO.java | 4 +- .../src/main/java/com/cloud/vm/NicVO.java | 5 +- .../storage/datastore/db/ImageStoreVO.java | 2 +- .../provider/DefaultHostListener.java | 34 ++- .../provider/DateraHostListener.java | 18 +- .../ScaleIOPrimaryDataStoreDriverTest.java | 4 - .../cloud/capacity/CapacityManagerImpl.java | 3 +- .../deploy/DeploymentPlanningManagerImpl.java | 183 ++++++++-------- .../network/NetworkUsageManagerImpl.java | 20 +- .../VirtualNetworkApplianceManagerImpl.java | 43 ++-- .../security/SecurityGroupListener.java | 2 +- .../storage/ImageStoreUploadMonitorImpl.java | 34 +-- .../storage/download/DownloadListener.java | 6 +- .../storage/listener/StoragePoolMonitor.java | 10 +- 23 files changed, 362 insertions(+), 326 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 7c107ed6f547..58e9d26339fb 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1320,8 +1320,8 @@ public void orchestrateStart(final String vmUuid, final Map pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, vm.getId()); if (CollectionUtils.isNotEmpty(pendingWorkJobs) || _haMgr.hasPendingHaWork(vm.getId())) { - String msg = "There are pending jobs or HA tasks working on the VM with id: " + vm.getId() + ", can't unmanage the VM."; + String msg = String.format("There are pending jobs or HA tasks working on the VM: %s, can't unmanage the VM.", vm); logger.info(msg); throw new ConcurrentOperationException(msg); } @@ -2124,8 +2124,8 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } else { HostVO host = _hostDao.findById(hostId); if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) { - logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: {} is not allowed", vm.getId()); - throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode"); + logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM: {} is not allowed", vm); + throw new CloudRuntimeException(String.format("Stop VM operation on the VM id%s is not allowed as host is preparing for maintenance mode", vm)); } } @@ -2509,7 +2509,7 @@ private void markVolumesInPool(VMInstanceVO vm, Answer[] hypervisorMigrationResu List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); logger.debug("Found {} volumes for VM {}(uuid:{}, id:{})", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); for (VolumeObjectTO result : results ) { - logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getId(), result.getPath(), result.getDataStoreUuid()); + logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getUuid(), result.getPath(), result.getDataStoreUuid()); VolumeVO volume = _volsDao.findById(result.getId()); StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid()); if (volume == null || pool == null) { @@ -2660,14 +2660,15 @@ private void afterStorageMigrationVmwareVMCleanup(StoragePool destPool, VMInstan private void removeStaleVmFromSource(VMInstanceVO vm, HostVO srcHost) { logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: {} from source host: {}", - vm.getInstanceName(), srcHost.getId()); + vm, srcHost); final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); uvc.setCleanupVmFiles(true); try { _agentMgr.send(srcHost.getId(), uvc); } catch (AgentUnavailableException | OperationTimedoutException e) { - throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() + - " after successfully migrating VM's storage across VMware Datacenters", e); + throw new CloudRuntimeException(String.format( + "Failed to unregister VM: %s from source host: %s after successfully migrating VM's storage across VMware Datacenters", + vm, srcHost), e); } } @@ -2722,10 +2723,10 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: {}", - dest.getHost().getId()); - throw new CloudRuntimeException( - "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " - + dest.getHost().getId()); + dest.getHost()); + throw new CloudRuntimeException(String.format( + "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: %s", + dest.getHost())); } } } @@ -2858,7 +2859,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - logger.warn("Error while checking the vm {} on host {}", vm, dstHostId, e); + logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e); } migrated = true; } finally { @@ -3834,7 +3835,7 @@ public void processConnect(final Host agent, final StartupCommand cmd, final boo return; } - logger.debug("Received startup command from hypervisor host. host id: {}", agent.getId()); + logger.debug("Received startup command from hypervisor host. host: {}", agent); _syncMgr.resetHostSyncState(agent.getId()); @@ -4224,10 +4225,10 @@ private boolean orchestrateRemoveVmFromNetwork(final VirtualMachine vm, final Ne logger.debug("Not need to remove the vm {} from network {} as the vm doesn't have nic in this network.", vm, network); return true; } - throw new ConcurrentOperationException("Unable to lock nic " + nic.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock nic %s", nic)); } - logger.debug("Lock is acquired for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network); + logger.debug("Lock is acquired for nic {} as a part of remove vm {} from network {}", lock, vm, network); try { final NicProfile nicProfile = @@ -4256,7 +4257,7 @@ private boolean orchestrateRemoveVmFromNetwork(final VirtualMachine vm, final Ne return true; } finally { _nicsDao.releaseFromLockTable(lock.getId()); - logger.debug("Lock is released for nic id {} as a part of remove vm {} from network {}", lock.getId(), vm, network); + logger.debug("Lock is released for nic {} as a part of remove vm {} from network {}", lock, vm, network); } } @@ -4348,9 +4349,8 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI vm.getServiceOfferingId(); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); - Host srcHost = _hostDao.findById(srcHostId); if (fromHost == null) { - String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHost); + String logMessageUnableToFindHost = String.format("Unable to find host to migrate from %s.", srcHostId); logger.info(logMessageUnableToFindHost); throw new CloudRuntimeException(logMessageUnableToFindHost); } @@ -4359,7 +4359,7 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI long destHostClusterId = dest.getCluster().getId(); long fromHostClusterId = fromHost.getClusterId(); if (fromHostClusterId != destHostClusterId) { - String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", srcHost); + String logMessageHostsOnDifferentCluster = String.format("Source and destination host are not in same cluster, unable to migrate to %s", fromHost); logger.info(logMessageHostsOnDifferentCluster); throw new CloudRuntimeException(logMessageHostsOnDifferentCluster); } @@ -4406,7 +4406,7 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI if (pfma == null || !pfma.getResult()) { final String details = pfma != null ? pfma.getDetails() : "null answer returned"; pfma = null; - throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dstHostId, details), dstHostId); + throw new AgentUnavailableException(String.format("Unable to prepare for migration to destination host [%s] due to [%s].", dest.getHost(), details), dstHostId); } } catch (final OperationTimedoutException e1) { throw new AgentUnavailableException("Operation timed out", dstHostId); @@ -4466,7 +4466,7 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - logger.error("Unable to cleanup source host [{}] due to [{}].", srcHostId, e.getMessage(), e); + logger.error("Unable to cleanup source host [{}] due to [{}].", fromHost, e.getMessage(), e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); @@ -4871,22 +4871,22 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { case Destroyed: case Expunging: - logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm.getId(), vm.getState()); + logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm, vm.getState()); break; case Migrating: - logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState()); + logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm, vm.getState()); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { logger.warn("Unexpected VM state transition exception, race-condition?", e); } - logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName()); + logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm); break; case Error: default: - logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm.getId(), vm.getState()); + logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm, vm.getState()); break; } } @@ -4901,16 +4901,16 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), getApiCommandResourceTypeForVm(vm).toString()); case Migrating: logger.info("VM {} is at {} and we received a {} report while there is no pending jobs on it" - , vm.getInstanceName(), vm.getState(), vm.getPowerState()); + , vm, vm.getState(), vm.getPowerState()); if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { - logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm.getInstanceName()); + logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm); if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { - logger.info("VM {} already has a pending HA task working on it.", vm.getInstanceName()); + logger.info("VM {} already has a pending HA task working on it.", vm); } return; } @@ -4937,10 +4937,10 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), - VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() - + " -> Stopped) from out-of-context transition."); + VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) state is sync-ed (%s -> Stopped) from out-of-context transition.", + vm.getHostName(), vm, vm.getState())); - logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm.getInstanceName()); + logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm); break; @@ -4983,8 +4983,8 @@ private void scanStalledVMInTransitionStateOnDisconnectedHosts() { final VMInstanceVO vm = _vmDao.findById(vmId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), - VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") is stuck in " + vm.getState() - + " state and its host is unreachable for too long"); + VM_SYNC_ALERT_SUBJECT, String.format("VM %s(%s) is stuck in %s state and its host is unreachable for too long", + vm.getHostName(), vm, vm.getState())); } } @@ -5502,7 +5502,7 @@ private Pair orchestrateMigrateAway(final VmWorkMigrateA try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { - logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm.getId(), e); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm, e); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java index c00d939b3dfd..db90ce0287ec 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State.Event; import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -264,4 +265,11 @@ public void setArch(CPU.CPUArch arch) { public PartitionType partitionType() { return PartitionType.Cluster; } + + @Override + public String toString() { + return String.format("EngineCluster %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java index 57382530f40b..4691dd323042 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java @@ -43,6 +43,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.StateMachine; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "data_center") @@ -523,4 +524,11 @@ public PartitionType partitionType() { public DataCenter.Type getType() { return type; } + + @Override + public String toString() { + return String.format("EngineDataCenter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index d804f079e174..7c94e9e28891 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -53,6 +53,7 @@ import com.cloud.utils.db.StateMachine; import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "host") @@ -697,7 +698,9 @@ public boolean equals(Object obj) { @Override public String toString() { - return new StringBuilder("Host[").append("-").append(id).append("-").append(type).append("]").toString(); + return String.format("EngineHost %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index e1b798d16d60..f47abf658638 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1248,18 +1248,18 @@ protected void configureNicProfileBasedOnRequestedIp(NicProfile requestedNicProf VlanVO vlanVo = _vlanDao.findByNetworkIdAndIpv4(network.getId(), requestedIpv4Address); if (vlanVo == null) { - throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network id='%s']", - requestedIpv4Address, network.getId())); + throw new InvalidParameterValueException(String.format("Trying to configure a Nic with the requested [IPv4='%s'] but cannot find a Vlan for the [network '%s']", + requestedIpv4Address, network)); } String ipv4Gateway = vlanVo.getVlanGateway(); String ipv4Netmask = vlanVo.getVlanNetmask(); if (!NetUtils.isValidIp4(ipv4Gateway)) { - throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [VlanId='%s'] is not valid", ipv4Gateway, vlanVo.getId())); + throw new InvalidParameterValueException(String.format("The [IPv4Gateway='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Gateway, vlanVo.getId(), vlanVo.getUuid())); } if (!NetUtils.isValidIp4Netmask(ipv4Netmask)) { - throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [VlanId='%s'] is not valid", ipv4Netmask, vlanVo.getId())); + throw new InvalidParameterValueException(String.format("The [IPv4Netmask='%s'] from [Vlan id=%d uuid=%s] is not valid", ipv4Netmask, vlanVo.getId(), vlanVo.getUuid())); } acquireLockAndCheckIfIpv4IsFree(network, requestedIpv4Address); @@ -1273,7 +1273,7 @@ protected void configureNicProfileBasedOnRequestedIp(NicProfile requestedNicProf String macAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); nicProfile.setMacAddress(macAddress); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network id='%s']", network.getId()), e); + throw new CloudRuntimeException(String.format("Cannot get next available mac address in [network %s]", network), e); } } } @@ -1285,7 +1285,7 @@ protected void acquireLockAndCheckIfIpv4IsFree(Network network, String requested IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), requestedIpv4Address); if (ipVO == null) { throw new InvalidParameterValueException( - String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network id='%s']", requestedIpv4Address, network.getId())); + String.format("Cannot find IPAddressVO for guest [IPv4 address='%s'] and [network %s]", requestedIpv4Address, network)); } try { IPAddressVO lockedIpVO = _ipAddressDao.acquireInLockTable(ipVO.getId()); @@ -1489,17 +1489,17 @@ private void setupPersistentNetwork(NetworkVO network, NetworkOfferingVO offerin final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host.getId()); + logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); } } catch (Exception e) { - logger.warn("Failed to connect to host: {}", host.getName()); + logger.warn("Failed to connect to host: {}", host); } } if (clusterToHostsMap.keySet().size() != clusterVOs.size()) { @@ -1526,7 +1526,7 @@ public Pair implementNetwork(final long networkId, final NetworkVO network = _networksDao.findById(networkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); if (isNetworkImplemented(network)) { - logger.debug("Network id={} is already implemented", networkId); + logger.debug("Network {} is already implemented", network); implemented.set(guru, network); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_UPDATE, network.getAccountId(), network.getDataCenterId(), network.getId(), network.getName(), network.getNetworkOfferingId(), null, network.getState().name(), Network.class.getName(), network.getUuid(), true); @@ -1542,11 +1542,11 @@ public Pair implementNetwork(final long networkId, final throw ex; } - logger.debug("Lock is acquired for network id {} as a part of network implement", networkId); + logger.debug("Lock is acquired for network id {} as a part of network implement", network); try { if (isNetworkImplemented(network)) { - logger.debug("Network id={} is already implemented", networkId); + logger.debug("Network {} is already implemented", network); implemented.set(guru, network); return implemented; } @@ -1618,7 +1618,7 @@ public Pair implementNetwork(final long networkId, final } _networksDao.releaseFromLockTable(networkId); - logger.debug("Lock is released for network id {} as a part of network implement", networkId); + logger.debug("Lock is released for network {} as a part of network implement", network); } } @@ -1743,57 +1743,57 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true); } if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) { - logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply firewall Egress rule(s) as a part of network {} restart", network); success = false; } // associate all ip addresses if (!_ipAddrMgr.applyIpAssociations(network, false)) { - logger.warn("Failed to apply ip addresses as a part of network id {} restart", networkId); + logger.warn("Failed to apply ip addresses as a part of network {} restart", network); success = false; } // apply BGP settings if (!bgpService.applyBgpPeers(network, false)) { - logger.warn("Failed to apply bpg peers as a part of network id {} restart", networkId); + logger.warn("Failed to apply bpg peers as a part of network {} restart", network); success = false; } // apply static nat if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { - logger.warn("Failed to apply static nats a part of network id {} restart", networkId); + logger.warn("Failed to apply static nats a part of network {} restart", network); success = false; } // apply firewall rules final List firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) { - logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network {} restart", network); success = false; } // apply port forwarding rules if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply port forwarding rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply port forwarding rule(s) as a part of network {} restart", network); success = false; } // apply static nat rules if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply static nat rule(s) as a part of network id={} restart", networkId); + logger.warn("Failed to reapply static nat rule(s) as a part of network {} restart", network); success = false; } // apply public load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { - logger.warn("Failed to reapply Public load balancer rules as a part of network id={} restart", networkId); + logger.warn("Failed to reapply Public load balancer rules as a part of network {} restart", network); success = false; } // apply internal load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { - logger.warn("Failed to reapply internal load balancer rules as a part of network id={} restart", networkId); + logger.warn("Failed to reapply internal load balancer rules as a part of network {} restart", network); success = false; } @@ -1803,7 +1803,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call for (final RemoteAccessVpn vpn : vpnsToReapply) { // Start remote access vpn per ip if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) { - logger.warn("Failed to reapply vpn rules as a part of network id={} restart", networkId); + logger.warn("Failed to reapply vpn rules as a part of network {} restart", network); success = false; } } @@ -1811,7 +1811,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call //apply network ACLs if (!_networkACLMgr.applyACLToNetwork(networkId)) { - logger.warn("Failed to reapply network ACLs as a part of of network id={}", networkId); + logger.warn("Failed to reapply network ACLs as a part of of network {}", network); success = false; } @@ -1922,13 +1922,13 @@ public void cleanupConfigForServicesInNetwork(List services, final Netwo long userId = User.UID_SYSTEM; //remove all PF/Static Nat rules for the network logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration", - services, network.getUuid(), network.getNetworkOfferingId()); + services, network, network.getNetworkOfferingId()); if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) { try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network); } else { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network); } if (services.contains(Service.StaticNat.getName())) { //removing static nat configured on ips. @@ -1947,7 +1947,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } } if (services.contains(Service.SourceNat.getName())) { @@ -1966,9 +1966,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (services.contains(Service.Lb.getName())) { //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) { - logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); + logger.debug("Successfully cleaned up load balancing rules for network {}", network); } else { - logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network); } } @@ -1976,12 +1976,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); + logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } } @@ -1991,7 +1991,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true); } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network.getUuid(), ex); + logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network, ex); } } } @@ -2128,8 +2128,8 @@ public int compare(final NicVO nic1, final NicVO nic2) { for (final NicVO nic : nics) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); - throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id={} as a part of preparing nic {}", nic.getNetworkId(), nic); + throw new CloudRuntimeException(String.format("Failed to implement network id=%d as a part preparing nic %s", nic.getNetworkId(), nic)); } final NetworkVO network = implemented.second(); @@ -2324,7 +2324,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep if (nic == null && !addedURIs.contains(broadcastUri.toString())) { //Nic details are not available in DB //Create nic profile for migration - logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm.getId()); + logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm); final NetworkVO network = _networksDao.findById(ntwkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NicProfile profile = new NicProfile(); @@ -2513,7 +2513,7 @@ public Pair doInTransaction(final TransactionStatus status) @Override public void cleanupNics(final VirtualMachineProfile vm) { - logger.debug("Cleaning network for vm: {}", vm.getId()); + logger.debug("Cleaning network for vm: {}", vm); final List nics = _nicDao.listByVmId(vm.getId()); for (final NicVO nic : nics) { @@ -2610,7 +2610,7 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { _nicDao.remove(nic.getId()); } - logger.debug("Removed nic id={}", nic.getId()); + logger.debug("Removed nic {}", nic); // release assigned IPv6 for Isolated Network VR NIC if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType()) @@ -2623,7 +2623,7 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { //remove the secondary ip addresses corresponding to this nic if (!removeVmSecondaryIpsOfNic(nic.getId())) { - logger.debug("Removing nic {} secondary ip addresses failed", nic.getId()); + logger.debug("Removing nic {} secondary ip addresses failed", nic); } } @@ -2837,16 +2837,21 @@ private Network createGuestNetwork(final long networkOfferingId, final String na } if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) && _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) { - throw new InvalidParameterValueException("The VLAN tag for isolated PVLAN " + isolatedPvlan + " is already being used for dynamic vlan allocation for the guest network in zone " - + zone.getName()); + throw new InvalidParameterValueException(String.format( + "The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s", + isolatedPvlan, zone)); } if (!UuidUtils.isUuid(vlanId)) { // For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) { if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + vlanId + " already exists or overlaps with other network vlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network vlans in zone %s", + vlanId, zone)); } else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + isolatedPvlan + " already exists or overlaps with other network vlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network vlans in zone %s", + isolatedPvlan, zone)); } else { final List dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri)); //for the network that is created as part of private gateway, @@ -2878,7 +2883,8 @@ private Network createGuestNetwork(final long networkOfferingId, final String na // don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or // shared network with same Vlan ID in the zone if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) { - throw new InvalidParameterValueException("There is an existing isolated/shared network that overlaps with vlan id:" + vlanId + " in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone)); } } } @@ -2893,7 +2899,9 @@ private Network createGuestNetwork(final long networkOfferingId, final String na if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) { if (networkDomain != null) { // TBD: NetworkOfferingId and zoneId. Send uuids instead. - throw new InvalidParameterValueException("Domain name change is not supported by network offering id=" + networkOfferingId + " in zone id=" + zoneId); + throw new InvalidParameterValueException(String.format( + "Domain name change is not supported by network offering id=%d in zone %s", + networkOfferingId, zone)); } } else { if (networkDomain == null) { @@ -3028,8 +3036,9 @@ public Network doInTransaction(final TransactionStatus status) { } if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) { - throw new InvalidParameterValueException("Network with vlan " + vlanIdFinal + - " already exists or overlaps with other network pvlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with vlan %s already exists or overlaps with other network pvlans in zone %s", + vlanIdFinal, zone)); } userNetwork.setBroadcastUri(uri); @@ -3044,9 +3053,9 @@ public Network doInTransaction(final TransactionStatus status) { } URI uri = NetUtils.generateUriForPvlan(vlanIdFinal, isolatedPvlan, isolatedPvlanType.toString()); if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString(), isolatedPvlanType).size() > 0) { - throw new InvalidParameterValueException("Network with primary vlan " + vlanIdFinal + - " and secondary vlan " + isolatedPvlan + " type " + isolatedPvlanType + - " already exists or overlaps with other network pvlans in zone " + zoneId); + throw new InvalidParameterValueException(String.format( + "Network with primary vlan %s and secondary vlan %s type %s already exists or overlaps with other network pvlans in zone %s", + vlanIdFinal, isolatedPvlan, isolatedPvlanType, zone)); } userNetwork.setBroadcastUri(uri); userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan); @@ -3189,7 +3198,7 @@ public Boolean doInTransaction(final TransactionStatus status) { boolean result = false; if (success) { - logger.debug("Network id={} is shutdown successfully, cleaning up corresponding resources now.", networkId); + logger.debug("Network {} is shutdown successfully, cleaning up corresponding resources now.", networkFinal); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); @@ -3257,7 +3266,7 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont } finally { // just warn the administrator that the network elements failed to shutdown if (!cleanupResult) { - logger.warn("Failed to cleanup network id={} resources as a part of shutdownNetwork", network.getId()); + logger.warn("Failed to cleanup network {} resources as a part of shutdownNetwork", network); } } @@ -3299,15 +3308,15 @@ private void cleanupPersistentnNetworkResources(NetworkVO network) { CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to); CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host.getId()); + logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); } } catch (Exception e) { - logger.warn("Failed to cleanup network resources on host: {}", host.getName()); + logger.warn("Failed to cleanup network resources on host: {}", host); } } } @@ -3337,7 +3346,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con // Don't allow to delete network via api call when it has vms assigned to it final int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - logger.debug("The network id={} has active Nics, but shouldn't.", networkId); + logger.debug("The network {} has active Nics, but shouldn't.", network); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, -1 * nicCount); @@ -3367,7 +3376,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con boolean success = true; if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) { - logger.warn("Unable to delete network id={}: failed to cleanup network resources", networkId); + logger.warn("Unable to delete network {}: failed to cleanup network resources", network); return false; } @@ -3396,7 +3405,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con } if (success) { - logger.debug("Network id={} is destroyed successfully, cleaning up corresponding resources now.", networkId); + logger.debug("Network {} is destroyed successfully, cleaning up corresponding resources now.", network); final NetworkVO networkFinal = network; try { @@ -3495,7 +3504,7 @@ protected Pair> deleteVlansInNetwork(final NetworkVO netwo for (final VlanVO vlan : publicVlans) { VlanVO vlanRange = _configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount); if (vlanRange == null) { - logger.warn("Failed to delete vlan " + vlan.getId() + ");"); + logger.warn("Failed to delete vlan [id: {}, uuid: {}];", vlan.getId(), vlan.getUuid()); result = false; } else { deletedPublicVlanRange.add(vlanRange); @@ -3505,16 +3514,16 @@ protected Pair> deleteVlansInNetwork(final NetworkVO netwo //cleanup private vlans final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId); if (privateIpAllocCount > 0) { - logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", networkId); + logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", network); result = false; } else { _privateIpDao.deleteByNetworkId(networkId); - logger.debug("Deleted ip range for private network id={}", networkId); + logger.debug("Deleted ip range for private network {}", network); } // release vlans of user-shared networks without specifyvlan if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) { - logger.debug("Releasing vnet for the network id={}", network.getId()); + logger.debug("Releasing vnet for the network {}", network); _dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(), network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId()); } @@ -3630,7 +3639,7 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, boolean restartRequired = false; final NetworkVO network = _networksDao.findById(networkId); - logger.debug("Restarting network {}...", networkId); + logger.debug("Restarting network {}...", network); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()); @@ -3985,51 +3994,51 @@ private boolean cleanupNetworkResources(final long networkId, final Account call //remove all PF/Static Nat rules for the network try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network {}", network); } else { success = false; - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) { - logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); + logger.debug("Successfully cleaned up load balancing rules for network {}", network); } else { // shouldn't even come here as network is being cleaned up after all network elements are shutdown success = false; - logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup LB rules as a part of network {} cleanup", network); } //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); + logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { success = false; - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); + logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup due to resourceUnavailable", network, ex); } //revoke all network ACLs for network try { if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - logger.debug("Successfully cleaned up NetworkACLs for network id={}", networkId); + logger.debug("Successfully cleaned up NetworkACLs for network {}", network); } else { success = false; - logger.warn("Failed to cleanup NetworkACLs as a part of network id={} cleanup", networkId); + logger.warn("Failed to cleanup NetworkACLs as a part of network {} cleanup", network); } } catch (final ResourceUnavailableException ex) { success = false; - logger.warn("Failed to cleanup Network ACLs as a part of network id={} cleanup due to resourceUnavailable ", networkId, ex); + logger.warn("Failed to cleanup Network ACLs as a part of network {} cleanup due to resourceUnavailable ", network, ex); } //release all ip addresses @@ -4072,7 +4081,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all PF rules as revoked and apply them on the backend (not in the DB) final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), networkId); + logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), network); for (final PortForwardingRuleVO pfRule : pfRules) { logger.trace("Marking pf rule {} with Revoke state", pfRule); @@ -4092,7 +4101,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all static rules as revoked and apply them on the backend (not in the DB) final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); final List staticNatRules = new ArrayList(); - logger.debug("Releasing {} static nat rules for network id={} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), networkId); + logger.debug("Releasing {} static nat rules for network {} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), network); for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { logger.trace("Marking static nat rule {} with Revoke state", firewallStaticNatRule); @@ -4100,7 +4109,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId()); if (ip == null || !ip.isOneToOneNat() || ip.getAssociatedWithVmId() == null) { - throw new InvalidParameterValueException("Source ip address of the rule id=" + firewallStaticNatRule.getId() + " is not static nat enabled"); + throw new InvalidParameterValueException(String.format("Source ip address of the rule %s is not static nat enabled", firewallStaticNatRule)); } //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), firewallStaticNatRule.getNetworkId()); @@ -4140,7 +4149,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // revoke all firewall rules for the network w/o applying them on the DB final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - logger.debug("Releasing firewall ingress rules for network id={} as a part of shutdownNetworkRules", firewallRules.size(), networkId); + logger.debug("Releasing firewall ingress rules for network {} as a part of shutdownNetworkRules", firewallRules.size(), network); for (final FirewallRuleVO firewallRule : firewallRules) { logger.trace("Marking firewall ingress rule {} with Revoke state", firewallRule); @@ -4158,7 +4167,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - logger.debug("Releasing {} firewall egress rules for network id={} as a part of shutdownNetworkRules", firewallEgressRules.size(), networkId); + logger.debug("Releasing {} firewall egress rules for network {} as a part of shutdownNetworkRules", firewallEgressRules.size(), network); try { // delete default egress rule @@ -4190,7 +4199,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } if (network.getVpcId() != null) { - logger.debug("Releasing Network ACL Items for network id={} as a part of shutdownNetworkRules", networkId); + logger.debug("Releasing Network ACL Items for network {} as a part of shutdownNetworkRules", network); try { //revoke all Network ACLs for the network w/o applying them in the DB @@ -4207,7 +4216,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal //release all static nats for the network if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { - logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id {}", networkId); + logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network {}", network); success = false; } @@ -4310,12 +4319,12 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd); if (answer == null) { - logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host.getId()); - throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId()); + logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host); + throw new ConnectionException(true, String.format("Unable to get an answer to the CheckNetworkCommand from agent: %s", host)); } if (!answer.getResult()) { - logger.warn("Unable to setup agent {} due to {}", hostId, answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host, answer.getDetails()); final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg); throw new ConnectionException(true, msg); @@ -4471,8 +4480,8 @@ public NicProfile createNicForVm(final Network network, final NicProfile request if (prepare) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); - throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id={} as a part of preparing nic {}", nic.getNetworkId(), nic); + throw new CloudRuntimeException(String.format("Failed to implement network id=%d as a part preparing nic %s", nic.getNetworkId(), nic)); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network); @@ -4588,18 +4597,18 @@ protected List getElementForServiceInNetwork(final Network netwo final List providers = getProvidersForServiceInNetwork(network, service); //Only support one provider now if (providers == null) { - logger.error("Cannot find {} provider for network {}", service.getName(), network.getId()); + logger.error("Cannot find {} provider for network {}", service.getName(), network); return null; } if (providers.size() != 1 && service != Service.Lb) { //support more than one LB providers only - logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network.getId()); + logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network); return null; } for (final Provider provider : providers) { final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network.getId()); + logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network); elements.add(element); } return elements; @@ -4743,7 +4752,7 @@ public NicVO doInTransaction(TransactionStatus status) { int count = 1; if (vo.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id={} on {}", network.getUuid(), count); + logger.debug("Changing active number of nics for network {} on {}", network, count); _networksDao.changeActiveNicsBy(network.getId(), count); } if (vo.getVmType() == VirtualMachine.Type.User @@ -4807,16 +4816,16 @@ protected Pair getNetworkGatewayAndNetmaskForNicImport(Network n private String generateNewMacAddressIfForced(Network network, String macAddress, boolean forced) { if (!forced) { - throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network with ID " + network.getUuid() + + throw new CloudRuntimeException("NIC with MAC address " + macAddress + " exists on network " + network + " and forced flag is disabled"); } try { - logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network.getName(), macAddress); + logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network, macAddress); String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); logger.debug("Successfully generated the mac address {}, using it instead of the conflicting address {}", newMacAddress, macAddress); return newMacAddress; } catch (InsufficientAddressCapacityException e) { - String msg = String.format("Could not generate a new mac address on network %s", network.getName()); + String msg = String.format("Could not generate a new mac address on network %s", network); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -4824,7 +4833,7 @@ private String generateNewMacAddressIfForced(Network network, String macAddress, @Override public void unmanageNics(VirtualMachineProfile vm) { - logger.debug("Unmanaging NICs for VM: {}", vm.getId()); + logger.debug("Unmanaging NICs for VM: {}", vm); VirtualMachine virtualMachine = vm.getVirtualMachine(); final List nics = _nicDao.listByVmId(vm.getId()); diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index 4e50c95f24e3..b0b27933ccee 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -201,7 +201,8 @@ public void setUuid(String uuid) { @Override public String toString() { - return String.format("HostPod %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + return String.format("HostPod %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java index b032966dd5a2..4d6bee5c8614 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionVO.java @@ -32,6 +32,7 @@ import com.cloud.network.Site2SiteVpnConnection; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -182,4 +183,11 @@ public Class getEntityType() { public String getName() { return null; } + + @Override + public String toString() { + return String.format("Site2SiteVpnConnection %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "state")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java index 07b25e7a28ca..2a4c19fb11dc 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java @@ -36,6 +36,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "firewall_rules") @@ -258,7 +259,9 @@ public FirewallRuleVO(String xId, Long ipAddressId, Integer portStart, Integer p @Override public String toString() { - return new StringBuilder("Rule[").append(id).append("-").append(purpose).append("-").append(state).append("]").toString(); + return String.format("FirewallRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "purpose", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java index 27d8227284b1..e8ccc2ebcf1c 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java @@ -28,6 +28,7 @@ import javax.persistence.Transient; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc") @@ -210,8 +211,9 @@ public void setDisplayText(final String displayText) { @Override public String toString() { - final StringBuilder buf = new StringBuilder("[VPC ["); - return buf.append(id).append("-").append(name).append("]").toString(); + return String.format("VPC %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index 9dc9734f8ab3..10d08601515b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -572,7 +572,9 @@ public int hashCode() { @Override public String toString() { - return String.format("Template %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uniqueName", "format")); + return String.format("Template %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "uniqueName", "format")); } public void setRemoved(Date removed) { diff --git a/engine/schema/src/main/java/com/cloud/vm/NicVO.java b/engine/schema/src/main/java/com/cloud/vm/NicVO.java index 936efd112b74..6c569e22dd95 100644 --- a/engine/schema/src/main/java/com/cloud/vm/NicVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/NicVO.java @@ -330,7 +330,10 @@ public void setCreated(Date created) { @Override public String toString() { - return String.format("Nic %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "instanceId", "deviceId", "broadcastUri", "reservationId", "iPv4Address")); + return String.format("Nic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "instanceId", + "deviceId", "broadcastUri", "reservationId", "iPv4Address")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java index d660960b713c..8be37df0d5b4 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java @@ -219,7 +219,7 @@ public void setUsedBytes(Long usedBytes) { @Override public String toString() { - return String.format("ImageStoreVO %s", + return String.format("ImageStore %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "name", "uuid")); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index c6d9fab5f17a..829f2c270f57 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -103,7 +103,7 @@ public boolean hostAdded(long hostId) { private boolean createPersistentNetworkResourcesOnHost(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - logger.warn(String.format("Host with id %ld can't be found", hostId)); + logger.warn("Host with id {} can't be found", hostId); return false; } setupPersistentNetwork(host); @@ -134,32 +134,31 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); cmd.setWait(modifyStoragePoolCommandWait); - logger.debug(String.format("Sending modify storage pool command to agent: %d for storage pool: %d with timeout %d seconds", - hostId, poolId, cmd.getWait())); + logger.debug("Sending modify storage pool command to agent: {} for storage pool: {} with timeout {} seconds", hostId, pool, cmd.getWait()); final Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %d", pool, hostId); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + - pool.getId()); + throw new CloudRuntimeException(String.format("Unable to establish connection from storage head to storage pool %s due to %s %s", + pool, answer.getDetails(), pool.getUuid())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + - pool.getId() + "Host=" + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format( + "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%sHost=%d", pool, hostId); ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) { String datastoreName = mspAnswer.getLocalDatastoreName(); List localStoragePools = this.primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); - throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" - + localStoragePool.getName()); + logger.warn("Storage pool: {} has already been added as local storage: {}", pool, localStoragePool); + throw new StorageConflictException(String.format( + "Cannot add shared storage pool: %s because it has already been added as local storage: %s", pool, localStoragePool)); } } } @@ -222,12 +221,11 @@ public boolean hostAboutToBeRemoved(long hostId) { new CleanupPersistentNetworkResourceCommand(createNicTOFromNetworkAndOffering(persistentNetworkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(hostId, cleanupCmd); if (answer == null) { - logger.error("Unable to get answer to the cleanup persistent network command " + persistentNetworkVO.getId()); + logger.error("Unable to get answer to the cleanup persistent network command {}", persistentNetworkVO); continue; } if (!answer.getResult()) { - String msg = String.format("Unable to cleanup persistent network resources from network %d on the host %d", persistentNetworkVO.getId(), hostId); - logger.error(msg); + logger.error("Unable to cleanup persistent network resources from network {} on the host {}", persistentNetworkVO, hostId); } } return true; @@ -258,11 +256,11 @@ private void setupPersistentNetwork(HostVO host) { new SetupPersistentNetworkCommand(createNicTOFromNetworkAndOffering(networkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(host.getId(), persistentNetworkCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get answer to the setup persistent network command " + networkVO.getId()); + throw new CloudRuntimeException(String.format("Unable to get answer to the setup persistent network command %s", networkVO)); } if (!answer.getResult()) { - String msg = String.format("Unable to create persistent network resources for network %d on the host %d in zone %d", networkVO.getId(), host.getId(), networkVO.getDataCenterId()); - logger.error(msg); + logger.error("Unable to create persistent network resources for network {} on the host {} in zone {}", + networkVO, host, networkVO.getDataCenterId()); } } } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java index 89ac2a9a21c3..8707ff2ee8d7 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java @@ -86,7 +86,7 @@ public boolean hostConnect(long hostId, long storagePoolId) { HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); + logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId); return false; } StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); @@ -280,9 +280,8 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { } if (!answer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; - HostVO host = _hostDao.findById(hostId); + String msg = String.format("Unable to modify targets on the following host: %s", host); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); @@ -294,21 +293,22 @@ private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StorageP Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", storagePool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + String msg = String.format("Unable to attach storage pool %s to host %d", storagePool, hostId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + - " (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format( + "Unable to establish a connection from agent to storage pool %s due to %s (%d)", + storagePool, answer.getDetails(), storagePool.getId())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format("ModifyStoragePoolAnswer expected ; Pool = %s Host = %d", storagePool, hostId); - logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + logger.info("Connection established between storage pool {} and host + {}", storagePool, hostId); } private List> getTargets(long clusterId, long storagePoolId) { diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java index 1b8fcee76bce..50a8ebf8aab8 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java @@ -386,7 +386,6 @@ public void revertBlockCopyVolumeOperationsOnDeleteSuccess() throws Exception{ when(srcData.getPath()).thenReturn("bec0ba7700000007:vol-11-6aef-10ee"); when(srcData.getFolder()).thenReturn("921c364500000007"); DataStore destStore = Mockito.mock(DataStore.class); - when(destStore.getId()).thenReturn(2L); when(destData.getDataStore()).thenReturn(destStore); doNothing().when(scaleIOPrimaryDataStoreDriver) .revokeAccess(any(), any(), any()); @@ -424,7 +423,6 @@ public void revertBlockCopyVolumeOperationsOnDeleteFailure() throws Exception{ when(srcData.getPath()).thenReturn(srcVolumePath); when(srcData.getFolder()).thenReturn("921c364500000007"); DataStore destStore = Mockito.mock(DataStore.class); - when(destStore.getId()).thenReturn(2L); when(destData.getDataStore()).thenReturn(destStore); doNothing().when(scaleIOPrimaryDataStoreDriver).revokeAccess(any(), any(), any()); @@ -473,11 +471,9 @@ public void deleteSourceVolumeFailureScenarioAfterSuccessfulBlockCopy() throws E VolumeInfo srcData = Mockito.mock(VolumeInfo.class); Host host = Mockito.mock(Host.class); - when(host.getId()).thenReturn(1L); String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee"; DataStore srcStore = Mockito.mock(DataStore.class); - when(srcStore.getId()).thenReturn(1L); DataTO volumeTO = Mockito.mock(DataTO.class); when(srcData.getDataStore()).thenReturn(srcStore); when(srcData.getTO()).thenReturn(volumeTO); diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 08f055ca3a35..1bf4c31ab1c5 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -280,7 +280,6 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @Override public void allocateVmCapacity(VirtualMachine vm, final boolean fromLastHost) { - final long vmId = vm.getId(); final long hostId = vm.getHostId(); final HostVO host = _hostDao.findById(hostId); final long clusterId = host.getClusterId(); @@ -389,7 +388,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (!hostHasCapacity || !hostHasCpuCapability) { - throw new CloudRuntimeException("Host does not have enough capacity for vm " + vmId); + throw new CloudRuntimeException("Host does not have enough capacity for vm " + vm); } _capacityDao.update(capacityCpu.getId(), capacityCpu); diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index cf202564a99f..a2399d653b94 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -49,7 +49,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; @@ -296,8 +295,7 @@ protected void avoidOtherClustersForDeploymentIfMigrationDisabled(VirtualMachine return; } final Long lastHostClusterId = lastHost.getClusterId(); - logger.warn(String.format("VM last host ID: %d belongs to zone ID: %s for which config - %s is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: %d from this zone to avoid list", - lastHost.getId(), vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId)); + logger.warn("VM last host ID: {} belongs to zone ID: {} for which config - {} is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: {} from this zone to avoid list", lastHost, vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId); List clusterIds = _clusterDao.listAllClusters(lastHost.getDataCenterId()); Set existingAvoidedClusters = avoids.getClustersToAvoid(); clusterIds = clusterIds.stream().filter(x -> !Objects.equals(x, lastHostClusterId) && (existingAvoidedClusters == null || !existingAvoidedClusters.contains(x))).collect(Collectors.toList()); @@ -317,20 +315,18 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym boolean volumesRequireEncryption = anyVolumeRequiresEncryption(_volsDao.findByInstance(vm.getId())); if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) { - logger.debug("Checking non dedicated resources to deploy VM [{}].", () -> ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "type", "instanceName")); + logger.debug("Checking non dedicated resources to deploy VM [{}].", vm); checkForNonDedicatedResources(vmProfile, dc, avoids); } - logger.debug(() -> { - String datacenter = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dc, "uuid", "name"); - String podVO = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(_podDao.findById(plan.getPodId()), "uuid", "name"); - String clusterVO = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(_clusterDao.findById(plan.getClusterId()), "uuid", "name"); - String vmDetails = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "type", "instanceName"); - return String.format("Trying to allocate a host and storage pools from datacenter [%s], pod [%s], cluster [%s], to deploy VM [%s] " - + "with requested CPU [%s] and requested RAM [%s].", datacenter, podVO, clusterVO, vmDetails, cpuRequested, toHumanReadableSize(ramRequested)); - }); + logger.debug("Trying to allocate a host and storage pools from datacenter [{}], " + + "pod [{}], cluster [{}], to deploy VM [{}] with requested CPU [{}] and requested RAM [{}].", + dc, _podDao.findById(plan.getPodId()), _clusterDao.findById(plan.getClusterId()), + vm, cpuRequested, toHumanReadableSize(ramRequested)); - logger.debug("ROOT volume [{}] {} to deploy VM [{}].", () -> getRootVolumeUuid(_volsDao.findByInstance(vm.getId())), () -> plan.getPoolId() != null ? "is ready" : "is not ready", vm::getUuid); + logger.debug("ROOT volume [{}] {} to deploy VM [{}].", + getRootVolume(_volsDao.findByInstance(vm.getId())), + plan.getPoolId() != null ? "is ready" : "is not ready", vm); avoidDisabledResources(vmProfile, dc, avoids); avoidDifferentArchResources(vmProfile, dc, avoids); @@ -351,8 +347,9 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym } } logger.debug("DeploymentPlan [{}] has not specified host. Trying to find another destination to deploy VM [{}], avoiding pods [{}], clusters [{}] and hosts [{}].", - () -> plan.getClass().getSimpleName(), vmProfile::getUuid, () -> StringUtils.join(avoids.getPodsToAvoid(), ", "), () -> StringUtils.join(avoids.getClustersToAvoid(), ", "), - () -> StringUtils.join(avoids.getHostsToAvoid(), ", ")); + plan.getClass().getSimpleName(), vmProfile, StringUtils.join(avoids.getPodsToAvoid(), ", "), + StringUtils.join(avoids.getClustersToAvoid(), ", "), + StringUtils.join(avoids.getHostsToAvoid(), ", ")); logger.debug("Deploy avoids pods: {}, clusters: {}, hosts: {}.", avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid()); @@ -364,7 +361,7 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym // check if datacenter is in avoid set if (avoids.shouldAvoid(dc)) { if (logger.isDebugEnabled()) { - logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + logger.debug("DataCenter = '" + dc + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } return null; } @@ -387,9 +384,8 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym boolean considerLastHost = vm.getLastHostId() != null && haVmTag == null && (considerLastHostStr == null || Boolean.TRUE.toString().equalsIgnoreCase(considerLastHostStr)); if (considerLastHost) { - logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); - HostVO host = _hostDao.findById(vm.getLastHostId()); + logger.debug("This VM has last host_id specified, trying to choose the same host: " + host); lastHost = host; DeployDestination deployDestination = deployInVmLastHost(vmProfile, plan, avoids, planner, vm, dc, offering, cpuRequested, ramRequested, volumesRequireEncryption); @@ -437,10 +433,10 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym avoids.addHost(dest.getHost().getId()); if (volumesRequireEncryption && !Boolean.parseBoolean(_hostDetailsDao.findDetail(hostId, Host.HOST_VOLUME_ENCRYPTION).getValue())) { - logger.warn(String.format("VM's volumes require encryption support, and the planner-provided host %s can't handle it", dest.getHost())); + logger.warn("VM's volumes require encryption support, and the planner-provided host {} can't handle it", dest.getHost()); continue; } else { - logger.debug(String.format("VM's volume encryption requirements are met by host %s", dest.getHost())); + logger.debug("VM's volume encryption requirements are met by host {}", dest.getHost()); } if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { @@ -485,7 +481,7 @@ private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, De _hostDao.loadDetails(host); if (host.getStatus() != Status.Up) { logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host is not in UP state or is not enabled. Host current status [{}] and resource status [{}].", - vm.getUuid(), host.getUuid(), host.getState().name(), host.getResourceState()); + vm, host, host.getState().name(), host.getResourceState()); return null; } if (checkVmProfileAndHost(vmProfile, host)) { @@ -509,14 +505,15 @@ private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, De boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (!hostHasCapacity || !hostHasCpuCapability) { - logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm, host); return null; } - logger.debug("Last host [{}] of VM [{}] is UP and has enough capacity. Checking for suitable pools for this host under zone [{}], pod [{}] and cluster [{}].", - host.getUuid(), vm.getUuid(), host.getDataCenterId(), host.getPodId(), host.getClusterId()); - Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); + + logger.debug("Last host [{}] of VM [{}] is UP and has enough capacity. Checking for suitable pools for this host under zone [{}], pod [{}] and cluster [{}].", + host, vm, dc, pod, cluster); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); @@ -536,7 +533,7 @@ private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, De // choose the potential pool for this VM for this // host if (suitableVolumeStoragePools.isEmpty()) { - logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host.getUuid(), vm.getUuid()); + logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host, vm); return null; } List suitableHosts = new ArrayList<>(); @@ -565,24 +562,23 @@ private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, De private boolean canUseLastHost(HostVO host, ExcludeList avoids, DeploymentPlan plan, VirtualMachine vm, ServiceOffering offering, boolean volumesRequireEncryption) { if (host == null) { - logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping this and trying other available hosts.", vm.getUuid(), vm.getLastHostId()); + logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping this and trying other available hosts.", vm, vm.getLastHostId()); return false; } if (avoids.shouldAvoid(host)) { - logger.warn("The last host [{}] of VM [{}] is in the avoid set. Skipping this and trying other available hosts.", host.getUuid(), vm.getUuid()); + logger.warn("The last host [{}] of VM [{}] is in the avoid set. Skipping this and trying other available hosts.", host, vm); return false; } if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) { - logger.debug(() -> String.format("The last host [%s] of VM [%s] cannot be picked, as the plan [%s] specifies a different cluster [%s] to deploy this VM. Skipping this and trying other available hosts.", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "clusterId"), vm.getUuid(), plan.getClass().getSimpleName(), plan.getClusterId())); + logger.debug("The last host [{}] of VM [{}] cannot be picked, as the plan [{}] specifies a different cluster [{}] to deploy this VM. Skipping this and trying other available hosts.", host, vm, plan.getClass().getSimpleName(), plan.getClusterId()); return false; } if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host already has the max number of running VMs (users and system VMs). Skipping this and trying other available hosts.", - vm.getUuid(), host.getUuid()); + vm, host); return false; } @@ -590,7 +586,7 @@ private boolean canUseLastHost(HostVO host, ExcludeList avoids, DeploymentPlan p ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); if (offeringDetails != null && !_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) { logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host does not have the required GPU devices available. Skipping this and trying other available hosts.", - vm.getUuid(), host.getUuid()); + vm, host); return false; } @@ -605,31 +601,31 @@ private DeployDestination deployInSpecifiedHostWithoutHA(VirtualMachineProfile v DeploymentPlanner planner, VirtualMachine vm, DataCenter dc, String uefiFlag) throws InsufficientServerCapacityException { Long hostIdSpecified = plan.getHostId(); - logger.debug("DeploymentPlan [{}] has specified host [{}] without HA flag. Choosing this host to deploy VM [{}].", plan.getClass().getSimpleName(), hostIdSpecified, vm.getUuid()); + logger.debug("DeploymentPlan [{}] has specified host [{}] without HA flag. Choosing this host to deploy VM [{}].", plan.getClass().getSimpleName(), hostIdSpecified, vm); HostVO host = _hostDao.findById(hostIdSpecified); if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) { _hostDao.loadDetails(host); if (MapUtils.isNotEmpty(host.getDetails()) && host.getDetails().containsKey(Host.HOST_UEFI_ENABLE) && "false".equalsIgnoreCase(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { - logger.debug("Cannot deploy VM [{}] to specified host [{}] because this host does not support UEFI VM deployment, returning.", vm.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] to specified host [{}] because this host does not support UEFI VM deployment, returning.", vm, host); return null; } } if (host == null) { - logger.debug("Cannot deploy VM [{}] to host [{}] because this host cannot be found.", vm.getUuid(), hostIdSpecified); + logger.debug("Cannot deploy VM [{}] to host [{}] because this host cannot be found.", vm, hostIdSpecified); return null; } if (avoids.shouldAvoid(host)) { - logger.debug("Cannot deploy VM [{}] to host [{}] because this host is in the avoid set.", vm.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] to host [{}] because this host is in the avoid set.", vm, host); return null; } - logger.debug("Trying to find suitable pools for host [{}] under pod [{}], cluster [{}] and zone [{}], to deploy VM [{}].", - host.getUuid(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), vm.getUuid()); - Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); + logger.debug("Trying to find suitable pools for host [{}] under pod [{}], cluster [{}] and zone [{}], to deploy VM [{}].", + host, dc, pod, cluster, vm); + boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), @@ -653,8 +649,6 @@ private DeployDestination deployInSpecifiedHostWithoutHA(VirtualMachineProfile v suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); if (potentialResources != null) { - pod = _podDao.findById(host.getPodId()); - cluster = _clusterDao.findById(host.getClusterId()); Map storageVolMap = potentialResources.second(); for (Volume vol : readyAndReusedVolumes) { storageVolMap.remove(vol); @@ -664,14 +658,14 @@ suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, av return dest; } } - logger.debug("Cannot deploy VM [{}] under host [{}], because no suitable pools were found.", vmProfile.getUuid(), host.getUuid()); + logger.debug("Cannot deploy VM [{}] under host [{}], because no suitable pools were found.", vmProfile, host); return null; } - protected String getRootVolumeUuid(List volumes) { + protected Volume getRootVolume(List volumes) { for (Volume volume : volumes) { if (volume.getVolumeType() == Volume.Type.ROOT) { - return volume.getUuid(); + return volume; } } return null; @@ -743,8 +737,8 @@ protected boolean isAdminVmDeployableInDisabledResources() { */ protected void avoidDisabledHosts(DataCenter dc, ExcludeList avoids) { List disabledHosts = _hostDao.listDisabledByDataCenterId(dc.getId()); - logger.debug(() -> String.format("Adding hosts [%s] of datacenter [%s] to the avoid set, because these hosts are in the Disabled state.", - disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc.getUuid())); + logger.debug("Adding hosts [{}] of datacenter [{}] to the avoid set, because these hosts are in the Disabled state.", + disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc); for (HostVO host : disabledHosts) { avoids.addHost(host.getId()); } @@ -757,7 +751,8 @@ protected void avoidDisabledClusters(DataCenter dc, ExcludeList avoids) { List pods = _podDao.listAllPods(dc.getId()); for (Long podId : pods) { List disabledClusters = _clusterDao.listDisabledClusters(dc.getId(), podId); - logger.debug(() -> String.format("Adding clusters [%s] of pod [%s] to the void set because these clusters are in the Disabled state.", StringUtils.join(disabledClusters, ", "), podId)); + logger.debug("Adding clusters [{}] of pod [{}] to the void set because these clusters are in the Disabled state.", + StringUtils.join(disabledClusters, ", "), podId); avoids.addClusterList(disabledClusters); } } @@ -767,7 +762,7 @@ protected void avoidDisabledClusters(DataCenter dc, ExcludeList avoids) { */ protected void avoidDisabledPods(DataCenter dc, ExcludeList avoids) { List disabledPods = _podDao.listDisabledPods(dc.getId()); - logger.debug(() -> String.format("Adding pods [%s] to the avoid set because these pods are in the Disabled state.", StringUtils.join(disabledPods, ", "))); + logger.debug("Adding pods [{}] to the avoid set because these pods are in the Disabled state.", StringUtils.join(disabledPods, ", ")); avoids.addPodList(disabledPods); } @@ -776,7 +771,7 @@ protected void avoidDisabledPods(DataCenter dc, ExcludeList avoids) { */ protected void avoidDisabledDataCenters(DataCenter dc, ExcludeList avoids) { if (dc.getAllocationState() == Grouping.AllocationState.Disabled) { - logger.debug("Adding datacenter [{}] to the avoid set because this datacenter is in Disabled state.", dc.getUuid()); + logger.debug("Adding datacenter [{}] to the avoid set because this datacenter is in Disabled state.", dc); avoids.addDataCenter(dc.getId()); } } @@ -831,7 +826,7 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC long accountDomainId = vmProfile.getOwner().getDomainId(); long accountId = vmProfile.getOwner().getAccountId(); logger.debug("Zone [{}] is dedicated. Checking if account [{}] in domain [{}] can use this zone to deploy VM [{}].", - dedicatedZone.getUuid(), accountId, accountDomainId, vmProfile.getUuid()); + dedicatedZone.getUuid(), accountId, accountDomainId, vmProfile); // If a zone is dedicated to an account then all hosts in this zone // will be explicitly dedicated to @@ -842,14 +837,14 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC if (dedicatedZone.getAccountId().equals(accountId)) { return; } else { - throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user account " + vmProfile.getOwner()); + throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc + " not available for the user account " + vmProfile.getOwner()); } } // if zone is dedicated to a domain. Check owner's access to the // domain level dedication group if (!_affinityGroupService.isAffinityGroupAvailableInDomain(dedicatedZone.getAffinityGroupId(), accountDomainId)) { - throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user domain " + vmProfile.getOwner()); + throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc + " not available for the user domain " + vmProfile.getOwner()); } } @@ -954,7 +949,7 @@ private void findAvoiSetForRouterVM(ExcludeList avoids, VirtualMachine vm, List< } logger.debug(() -> LogUtils.logGsonWithoutException("Adding pods [%s], clusters [%s] and hosts [%s] to the avoid list in the deploy process of VR VM [%s], " - + "because this VM is not dedicated to this components.", allPodsInDc, allClustersInDc, allHostsInDc, vm.getUuid())); + + "because this VM is not dedicated to this components.", allPodsInDc, allClustersInDc, allHostsInDc, vm)); avoids.addPodList(allPodsInDc); avoids.addClusterList(allClustersInDc); avoids.addHostList(allHostsInDc); @@ -962,7 +957,7 @@ private void findAvoiSetForRouterVM(ExcludeList avoids, VirtualMachine vm, List< private void findAvoidSetForNonExplicitUserVM(ExcludeList avoids, VirtualMachine vm, List allPodsInDc, List allClustersInDc, List allHostsInDc) { logger.debug(() -> LogUtils.logGsonWithoutException("Adding pods [%s], clusters [%s] and hosts [%s] to the avoid list in the deploy process of user VM [%s], " - + "because this VM is not explicitly dedicated to these components.", allPodsInDc, allClustersInDc, allHostsInDc, vm.getUuid())); + + "because this VM is not explicitly dedicated to these components.", allPodsInDc, allClustersInDc, allHostsInDc, vm)); avoids.addPodList(allPodsInDc); avoids.addClusterList(allClustersInDc); avoids.addHostList(allHostsInDc); @@ -1308,21 +1303,21 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { logger.debug("Adding cluster [{}] to the avoid set because the cluster's hypervisor [{}] does not match the VM [{}] hypervisor: [{}]. Skipping this cluster.", - clusterVO.getUuid(), clusterVO.getHypervisorType().name(), vmProfile.getUuid(), vmProfile.getHypervisorType().name()); + clusterVO, clusterVO.getHypervisorType().name(), vmProfile, vmProfile.getHypervisorType().name()); avoid.addCluster(clusterVO.getId()); continue; } - logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + Pod pod = _podDao.findById(clusterVO.getPodId()); + logger.debug("Checking resources in Cluster: " + clusterVO + " under Pod: " + pod); // search for resources(hosts and storage) under this zone, pod, // cluster. DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); potentialPlan.setHostPriorities(plan.getHostPriorities()); - Pod pod = _podDao.findById(clusterVO.getPodId()); if (CollectionUtils.isNotEmpty(avoid.getPodsToAvoid()) && avoid.getPodsToAvoid().contains(pod.getId())) { - logger.debug("The cluster is in a disabled pod : " + pod.getId()); + logger.debug("The cluster is in a disabled pod : " + pod); } else { // find suitable hosts under this cluster, need as many hosts as we // get. @@ -1357,10 +1352,10 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi return dest; } } else { - logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + logger.debug("No suitable storagePools found under this Cluster: " + clusterVO); } } else { - logger.debug("No suitable hosts found under this Cluster: " + clusterId); + logger.debug("No suitable hosts found under this Cluster: " + clusterVO); } } @@ -1517,7 +1512,7 @@ public int compare(Volume v1, Volume v2) { } else { for (StoragePool pool : pools) { if (!suitablePools.contains(pool)) { - logger.debug("Storage pool " + pool.getUuid() + " not allowed for this VM"); + logger.debug("Storage pool " + pool + " not allowed for this VM"); notAllowedPools.add(pool); } } @@ -1546,7 +1541,7 @@ public int compare(Volume v1, Volume v2) { continue; } } catch (StorageUnavailableException e) { - logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", storagePool.getUuid(), e.getMessage())); + logger.warn("Could not verify storage policy complaince against storage pool {} due to exception {}", storagePool, e.getMessage()); continue; } haveEnoughSpace = true; @@ -1554,7 +1549,7 @@ public int compare(Volume v1, Volume v2) { } if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck) { for (Volume vol : volumesOrderBySizeDesc) { - logger.debug("Found a suitable storage pool for all the VM volumes: " + storagePool.getUuid()); + logger.debug("Found a suitable storage pool for all the VM volumes: {}", storagePool); storage.put(vol, storagePool); } break; @@ -1563,7 +1558,7 @@ public int compare(Volume v1, Volume v2) { } else { for (Volume vol : volumesOrderBySizeDesc) { haveEnoughSpace = false; - logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + logger.debug("Checking if host: {} can access any suitable storage pool for volume: {}", potentialHost, vol.getVolumeType()); List volumePoolList = suitableVolumeStoragePools.get(vol); hostCanAccessPool = false; hostAffinityCheck = checkAffinity(potentialHost, preferredHosts); @@ -1585,7 +1580,7 @@ public int compare(Volume v1, Volume v2) { continue; } } catch (StorageUnavailableException e) { - logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", potentialSPool.getUuid(), e.getMessage())); + logger.warn("Could not verify storage policy complaince against storage pool {} due to exception {}", potentialSPool, e.getMessage()); continue; } } @@ -1622,16 +1617,15 @@ public int compare(Volume v1, Volume v2) { boolean hostFitsPlannerUsage = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && hostFitsPlannerUsage) { - logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + - " and associated storage pools for this VM"); + logger.debug("Found a potential host {} and associated storage pools for this VM", potentialHost); volumeAllocationMap.clear(); return new Pair<>(potentialHost, storage); } else { logger.debug("Adding host [{}] to the avoid set because: can access Pool [{}], has enough space [{}], affinity check [{}], fits planner [{}] usage [{}].", - potentialHost.getUuid(), hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); + potentialHost, hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); if (!hostMeetsEncryptionRequirements) { - logger.debug("Potential host " + potentialHost + " did not meet encryption requirements of all volumes"); + logger.debug("Potential host {} did not meet encryption requirements of all volumes", potentialHost); } avoid.addHost(potentialHost.getId()); } @@ -1672,13 +1666,13 @@ protected boolean hostCanAccessSPool(Host host, StoragePool pool) { hostCanAccessSPool = true; } - logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); + logger.debug("Host: {}{} access pool: {}", host, hostCanAccessSPool ? " can" : " cannot", pool); if (!hostCanAccessSPool) { if (_storageMgr.canHostPrepareStoragePoolAccess(host, pool)) { - logger.debug("Host: " + host.getId() + " can prepare access to pool: " + pool.getId()); + logger.debug("Host: {} can prepare access to pool: {}", host, pool); hostCanAccessSPool = true; } else { - logger.debug("Host: " + host.getId() + " cannot prepare access to pool: " + pool.getId()); + logger.debug("Host: {} cannot prepare access to pool: {}", host, pool); } } @@ -1705,7 +1699,7 @@ protected List findSuitableHosts(VirtualMachineProfile vmProfile, Deployme @Override public void reorderHostsByPriority(Map priorities, List hosts) { - logger.info("Re-ordering hosts " + hosts + " by priorities " + priorities); + logger.info("Re-ordering hosts {} by priorities {}", hosts, priorities); hosts.removeIf(host -> DataCenterDeployment.PROHIBITED_HOST_PRIORITY.equals(getHostPriority(priorities, host.getId()))); @@ -1718,7 +1712,7 @@ public int compare(Host host1, Host host2) { } ); - logger.info("Hosts after re-ordering are: " + hosts); + logger.info("Hosts after re-ordering are: {}", hosts); } private Integer getHostPriority(Map priorities, Long hostId) { @@ -1751,16 +1745,15 @@ protected Pair>, List> findSuitablePoolsFo Set poolsToAvoidOutput = new HashSet<>(originalAvoidPoolSet); for (VolumeVO toBeCreated : volumesTobeCreated) { - logger.debug("Checking suitable pools for volume [{}, {}] of VM [{}].", toBeCreated.getUuid(), toBeCreated.getVolumeType().name(), vmProfile.getUuid()); + logger.debug("Checking suitable pools for volume [{}, {}] of VM [{}].", toBeCreated, toBeCreated.getVolumeType().name(), vmProfile); if (toBeCreated.getState() == Volume.State.Allocated && toBeCreated.getPoolId() != null) { toBeCreated.setPoolId(null); if (!_volsDao.update(toBeCreated.getId(), toBeCreated)) { - throw new CloudRuntimeException(String.format("Error updating volume [%s] to clear pool Id.", toBeCreated.getId())); + throw new CloudRuntimeException(String.format("Error updating volume [%s] to clear pool Id.", toBeCreated)); } if (logger.isDebugEnabled()) { - String msg = String.format("Setting pool_id to NULL for volume id=%s as it is in Allocated state", toBeCreated.getId()); - logger.debug(msg); + logger.debug("Setting pool_id to NULL for volume id={} as it is in Allocated state", toBeCreated); } } // If the plan specifies a poolId, it means that this VM's ROOT @@ -1773,8 +1766,8 @@ protected Pair>, List> findSuitablePoolsFo } if (!isRootAdmin(vmProfile) && !isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { - logger.debug(String.format("Cannot find new storage pool to deploy volume [{}] of VM [{}] in cluster [{}] because allocation state is disabled. Returning.", - toBeCreated.getUuid(), vmProfile.getUuid(), plan.getClusterId())); + logger.debug("Cannot find new storage pool to deploy volume [{}] of VM [{}] in cluster [{}] because allocation state is disabled. Returning.", + toBeCreated, vmProfile, plan.getClusterId()); suitableVolumeStoragePools.clear(); break; } @@ -1795,7 +1788,7 @@ protected Pair>, List> findSuitablePoolsFo useLocalStorage = diskOffering.isUseLocalStorage(); } diskProfile.setUseLocalStorage(useLocalStorage); - logger.debug("Calling StoragePoolAllocators to find suitable pools to allocate volume [{}] necessary to deploy VM [{}].", toBeCreated.getUuid(), vmProfile.getUuid()); + logger.debug("Calling StoragePoolAllocators to find suitable pools to allocate volume [{}] necessary to deploy VM [{}].", toBeCreated, vmProfile); boolean foundPotentialPools = tryToFindPotentialPoolsToAlocateVolume(vmProfile, plan, avoid, returnUpTo, suitableVolumeStoragePools, toBeCreated, diskProfile); if (avoid.getPoolsToAvoid() != null) { @@ -1804,7 +1797,7 @@ protected Pair>, List> findSuitablePoolsFo } if (!foundPotentialPools) { - logger.debug(String.format("No suitable pools found for volume [{}] used by VM [{}] under cluster: [{}].", toBeCreated.getUuid(), vmProfile.getUuid(), plan.getClusterId())); + logger.debug("No suitable pools found for volume [{}] used by VM [{}] under cluster: [{}].", toBeCreated, vmProfile, plan.getClusterId()); // No suitable storage pools found under this cluster for this // volume. - remove any suitable pools found for other volumes. // All volumes should get suitable pools under this cluster; @@ -1837,12 +1830,12 @@ private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmP Map> suitableVolumeStoragePools, VolumeVO toBeCreated, DiskProfile diskProfile) { for (StoragePoolAllocator allocator : _storagePoolAllocators) { logger.debug("Trying to find suitable pools to allocate volume [{}] necessary to deploy VM [{}], using StoragePoolAllocator: [{}].", - toBeCreated.getUuid(), vmProfile.getUuid(), allocator.getClass().getSimpleName()); + toBeCreated, vmProfile, allocator.getClass().getSimpleName()); final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); if (suitablePools != null && !suitablePools.isEmpty()) { logger.debug("StoragePoolAllocator [{}] found {} suitable pools to allocate volume [{}] necessary to deploy VM [{}].", - allocator.getClass().getSimpleName(), suitablePools.size(), toBeCreated.getUuid(), vmProfile.getUuid()); + allocator.getClass().getSimpleName(), suitablePools.size(), toBeCreated, vmProfile); checkForPreferredStoragePool(suitablePools, vmProfile.getVirtualMachine(), suitableVolumeStoragePools, toBeCreated); return true; } @@ -1853,7 +1846,7 @@ private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmP private boolean checkIfPoolCanBeReused(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, Map> suitableVolumeStoragePools, List readyAndReusedVolumes, VolumeVO toBeCreated) { - logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated.getUuid(), vmProfile.getUuid(), toBeCreated.getPoolId()); + logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated, vmProfile, toBeCreated.getPoolId()); List suitablePools = new ArrayList<>(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { @@ -1867,11 +1860,11 @@ private boolean checkIfPoolCanBeReused(VirtualMachineProfile vmProfile, Deployme return canReusePool(vmProfile, plan, suitableVolumeStoragePools, readyAndReusedVolumes, toBeCreated, suitablePools, pool); } else { logger.debug("Pool [{}] of volume [{}] used by VM [{}] is in the avoid set. Need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); } } else { logger.debug("Pool [{}] of volume [{}] used by VM [{}] is in maintenance. Need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); } return false; } @@ -1888,7 +1881,7 @@ private boolean canReusePool(VirtualMachineProfile vmProfile, DeploymentPlan pla if (plan.getDataCenterId() == exstPoolDcId && ((plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) || (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE))) { logger.debug("Pool [{}] of volume [{}] used by VM [{}] fits the specified plan. No need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); suitablePools.add(pool); suitableVolumeStoragePools.put(toBeCreated, suitablePools); if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { @@ -1898,7 +1891,7 @@ private boolean canReusePool(VirtualMachineProfile vmProfile, DeploymentPlan pla } logger.debug("Pool [{}] of volume [{}] used by VM [{}] does not fit the specified plan. Need to reallocate a pool for this volume.", - pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + pool, toBeCreated, vmProfile); return false; } @@ -1928,13 +1921,11 @@ private Optional getPreferredStoragePool(List poolList Optional storagePool = getMatchingStoragePool(accountStoragePoolUuid, poolList); if (storagePool.isPresent()) { - logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: " - + storagePool.get().getUuid()); + logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: {}", storagePool.get()); } else { String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); - storagePool.ifPresent(pool -> logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: " - + pool.getUuid())); + storagePool.ifPresent(pool -> logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: {}", pool)); } return storagePool; } @@ -1943,19 +1934,19 @@ private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) // Check if the zone exists in the system DataCenterVO zone = _dcDao.findById(zoneId); if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { - logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + logger.info("Zone is currently disabled, cannot allocate to this zone: {}", zone); return false; } Pod pod = _podDao.findById(podId); if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { - logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + logger.info("Pod is currently disabled, cannot allocate to this pod: {}", pod); return false; } Cluster cluster = _clusterDao.findById(clusterId); if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { - logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + logger.info("Cluster is currently disabled, cannot allocate to this cluster: {}", cluster); return false; } diff --git a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java index 59e21dc9c770..04f7c0e72564 100644 --- a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java @@ -132,16 +132,13 @@ public Host addTrafficMonitor(AddTrafficMonitorCmd cmd) { long zoneId = cmd.getZoneId(); DataCenterVO zone = _dcDao.findById(zoneId); - String zoneName; if (zone == null) { throw new InvalidParameterValueException("Could not find zone with ID: " + zoneId); - } else { - zoneName = zone.getName(); } List trafficMonitorsInZone = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.TrafficMonitor, zoneId); if (trafficMonitorsInZone.size() != 0) { - throw new InvalidParameterValueException("Already added an traffic monitor in zone: " + zoneName); + throw new InvalidParameterValueException(String.format("Already added an traffic monitor in zone: %s", zone)); } URI uri; @@ -274,7 +271,7 @@ public boolean processAnswers(long agentId, long seq, Answer[] answers) { HostVO host = _hostDao.findById(agentId); if (host != null) { if ((host.getManagementServerId() == null) || (mgmtSrvrId != host.getManagementServerId())) { - logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : {}", host); return false; } } else { @@ -303,7 +300,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { final long zoneId = host.getDataCenterId(); final DetailVO lastCollectDetail = _detailsDao.findDetail(host.getId(), "last_collection"); if (lastCollectDetail == null) { - logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: " + host.getId()); + logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: {}", host); return false; } Date lastCollection = new Date(Long.parseLong(lastCollectDetail.getValue())); @@ -377,7 +374,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { DirectNetworkUsageAnswer answer = (DirectNetworkUsageAnswer)_agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; - String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; + String msg = String.format("Unable to get network usage stats from %s due to: %s.", host, details); logger.error(msg); return false; } else { @@ -410,7 +407,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { DirectNetworkUsageAnswer answer = (DirectNetworkUsageAnswer)_agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; - String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; + String msg = String.format("Unable to get network usage stats from %s due to: %s.", host, details); logger.error(msg); return false; } else { @@ -488,13 +485,12 @@ public void processHostAdded(long hostId) { @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if (cmd instanceof StartupTrafficMonitorCommand) { - long agentId = agent.getId(); - logger.debug("Sending RecurringNetworkUsageCommand to " + agentId); + logger.debug("Sending RecurringNetworkUsageCommand to {}", agent); RecurringNetworkUsageCommand watch = new RecurringNetworkUsageCommand(_interval); try { - _agentMgr.send(agentId, new Commands(watch), this); + _agentMgr.send(agent.getId(), new Commands(watch), this); } catch (AgentUnavailableException e) { - logger.debug("Can not process connect for host " + agentId, e); + logger.debug("Can not process connect for host {}", agent, e); } } return; diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 8a28eeabe21e..5f86c0e17210 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -394,7 +394,7 @@ public VirtualRouter upgradeRouter(final UpgradeRouterCmd cmd) { _accountMgr.checkAccess(caller, null, true, router); if (router.getServiceOfferingId() == serviceOfferingId) { - logger.debug("Router: " + routerId + "already has service offering: " + serviceOfferingId); + logger.debug("Router: {} already has service offering: {}", router, serviceOfferingId); return _routerDao.findById(routerId); } @@ -410,7 +410,7 @@ public VirtualRouter upgradeRouter(final UpgradeRouterCmd cmd) { // check if it is a system service offering, if yes return with error as // it cannot be used for user vms if (!newServiceOffering.isSystemUse()) { - throw new InvalidParameterValueException("Cannot upgrade router vm to a non system service offering " + serviceOfferingId); + throw new InvalidParameterValueException(String.format("Cannot upgrade router vm to a non system service offering %s", newServiceOffering)); } // Check that the router is stopped @@ -426,15 +426,16 @@ public VirtualRouter upgradeRouter(final UpgradeRouterCmd cmd) { // storage pool preference as the VM's current service // offering if (_itMgr.isRootVolumeOnLocalStorage(routerId) != newDiskOffering.isUseLocalStorage()) { - throw new InvalidParameterValueException("Can't upgrade, due to new local storage status : " + newDiskOffering.isUseLocalStorage() + " is different from " - + "current local storage status of router " + routerId); + throw new InvalidParameterValueException(String.format( + "Can't upgrade, due to new local storage status : %s is different from current local storage status of router %s", + newDiskOffering.isUseLocalStorage(), router)); } router.setServiceOfferingId(serviceOfferingId); if (_routerDao.update(routerId, router)) { return _routerDao.findById(routerId); } else { - throw new CloudRuntimeException("Unable to upgrade router " + routerId); + throw new CloudRuntimeException("Unable to upgrade router " + router); } } @@ -457,7 +458,7 @@ public VirtualRouter stopRouter(final long routerId, final boolean forced) throw final VirtualRouter virtualRouter = stop(router, forced, user, account); if (virtualRouter == null) { - throw new CloudRuntimeException("Failed to stop router with id " + routerId); + throw new CloudRuntimeException("Failed to stop router " + router); } // Clear stop pending flag after stopped successfully @@ -844,7 +845,7 @@ protected void updateSite2SiteVpnConnectionState(final List rout for (final Site2SiteVpnConnectionVO conn : conns) { final Site2SiteVpnConnectionVO lock = _s2sVpnConnectionDao.acquireInLockTable(conn.getId()); if (lock == null) { - throw new CloudRuntimeException("Unable to acquire lock for site to site vpn connection id " + conn.getId()); + throw new CloudRuntimeException(String.format("Unable to acquire lock for site to site vpn connection %s", conn)); } try { if (conn.getState() != Site2SiteVpnConnection.State.Connected && conn.getState() != Site2SiteVpnConnection.State.Disconnected && conn.getState() != Site2SiteVpnConnection.State.Connecting) { @@ -862,9 +863,9 @@ protected void updateSite2SiteVpnConnectionState(final List rout _s2sVpnConnectionDao.persist(conn); if (oldState != conn.getState()) { final String title = "Site-to-site Vpn Connection to " + gw.getName() + " just switched from " + oldState + " to " + conn.getState(); - final String context = - "Site-to-site Vpn Connection to " + gw.getName() + " on router " + router.getHostName() + "(id: " + router.getId() + ") " + - " just switched from " + oldState + " to " + conn.getState(); + final String context = String.format( + "Site-to-site Vpn Connection to %s on router %s(%s) just switched from %s to %s", + gw.getName(), router.getHostName(), router, oldState, conn.getState()); logger.info(context); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); } @@ -924,8 +925,9 @@ protected void updateRoutersRedundantState(final List routers) { final RedundantState currState = router.getRedundantState(); if (prevState != currState) { final String title = "Redundant virtual router " + router.getInstanceName() + " just switch from " + prevState + " to " + currState; - final String context = "Redundant virtual router (name: " + router.getHostName() + ", id: " + router.getId() + ") " + " just switch from " + prevState + " to " - + currState; + final String context = String.format( + "Redundant virtual router [%s] with hostname: %s just switch from %s to %s", + router, router.getHostName(), prevState, currState); logger.info(context); if (currState == RedundantState.PRIMARY) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); @@ -1032,8 +1034,11 @@ private void checkDuplicatePrimary(final List routers) { final DomainRouterVO dupRouter = networkRouterMaps.get(routerGuestNtwkId); final String title = "More than one redundant virtual router is in PRIMARY state! Router " + router.getHostName() + " and router " + dupRouter.getHostName(); - final String context = "Virtual router (name: " + router.getHostName() + ", id: " + router.getId() + " and router (name: " + dupRouter.getHostName() - + ", id: " + router.getId() + ") are both in PRIMARY state! If the problem persist, restart both of routers. "; + final String context = String.format( + "Virtual router %s with hostname: %s and router %s " + + "with hostname %s are both in PRIMARY state! " + + "If the problem persist, restart both of routers. ", + router, router.getHostName(), dupRouter, dupRouter.getHostName()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); logger.warn(context); } else { @@ -2667,7 +2672,7 @@ private void createDefaultEgressFirewallRule(final List rules, fin rules.add(rule); } else { - logger.debug("Egress policy for the Network " + networkId + " is already defined as Deny. So, no need to default the rule to Allow. "); + logger.debug("Egress policy for the Network {} is already defined as Deny. So, no need to default the rule to Allow. ", network); } } @@ -2942,7 +2947,7 @@ public DomainRouterVO stop(final VirtualRouter router, final boolean forced, fin public boolean removeDhcpSupportForSubnet(final Network network, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { logger.warn("Failed to add/remove VPN users: no router found for account and zone"); - throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); + throw new ResourceUnavailableException(String.format("Unable to assign ip addresses, domR doesn't exist for network %s", network), DataCenter.class, network.getDataCenterId()); } for (final DomainRouterVO router : routers) { @@ -3044,7 +3049,7 @@ public VirtualRouter startRouter(final long routerId, final boolean reprogramNet } final VirtualRouter virtualRouter = _nwHelper.startVirtualRouter(router, user, caller, params); if (virtualRouter == null) { - throw new CloudRuntimeException("Failed to start router with id " + routerId); + throw new CloudRuntimeException(String.format("Failed to start router %s", router)); } return virtualRouter; } @@ -3334,7 +3339,7 @@ private List rebootRouters(final List routers) { final List jobIds = new ArrayList(); for (final DomainRouterVO router : routers) { if (!_nwHelper.checkRouterTemplateVersion(router)) { - logger.debug("Upgrading template for router: " + router.getId()); + logger.debug("Upgrading template for router: {}", router); final Map params = new HashMap(); params.put("ctxUserId", "1"); params.put("ctxAccountId", "" + router.getAccountId()); @@ -3349,7 +3354,7 @@ private List rebootRouters(final List routers) { final long jobId = _asyncMgr.submitAsyncJob(job); jobIds.add(jobId); } else { - logger.debug("Router: " + router.getId() + " is already at the latest version. No upgrade required"); + logger.debug("Router: {} is already at the latest version. No upgrade required", router); } } return jobIds; diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java index b925137c4ce8..0e743b496f84 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java @@ -172,7 +172,7 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) logger.info("Scheduled network rules cleanup, interval=" + cleanupCmd.getInterval()); } catch (AgentUnavailableException e) { //usually hypervisors that do not understand sec group rules. - logger.debug("Unable to schedule network rules cleanup for host " + host.getId(), e); + logger.debug("Unable to schedule network rules cleanup for host {}", host, e); } if (_workTracker != null) { _workTracker.processConnect(host.getId()); diff --git a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java index c356a62c6279..e0364681426d 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -219,7 +219,7 @@ protected void runInContext() { DataStore dataStore = storeMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store {}", dataStore); continue; } VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId()); @@ -235,7 +235,7 @@ protected void runInContext() { try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for volume {}. Error details: {}", volume, e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { @@ -263,7 +263,7 @@ protected void runInContext() { DataStore dataStore = storeMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store {}", dataStore); continue; } VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); @@ -279,17 +279,19 @@ protected void runInContext() { try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for template {}. Error details: {}", template, e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for template {}", template); continue; } handleTemplateStatusResponse((UploadStatusAnswer)answer, template, templateDataStore); } } else { - String error = "Template " + template.getUuid() + " failed to upload as SSVM is either destroyed or SSVM agent not in 'Up' state"; + String error = String.format( + "Template %s failed to upload as SSVM is either destroyed or SSVM agent not in 'Up' state", + template); handleTemplateStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), template, templateDataStore); } } catch (Throwable th) { @@ -333,7 +335,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { Volume.class.getName(), tmpVolume.getUuid()); if (logger.isDebugEnabled()) { - logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully"); + logger.debug("Volume {} uploaded successfully", tmpVolume); } break; case IN_PROGRESS: @@ -346,7 +348,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); - msg = "Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Volume %s failed to upload due to operation timed out", tmpVolume); logger.error(msg); sendAlert = true; } else { @@ -358,7 +360,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); - msg = "Volume " + tmpVolume.getUuid() + " failed to upload. Error details: " + answer.getDetails(); + msg = String.format("Volume %s failed to upload. Error details: %s", tmpVolume, answer.getDetails()); logger.error(msg); sendAlert = true; break; @@ -368,7 +370,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpVolumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationTimeout, null, _volumeDao); - msg = "Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Volume %s failed to upload due to operation timed out", tmpVolume); logger.error(msg); sendAlert = true; } @@ -415,7 +417,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { logger.debug("Received OVF information from the uploaded template"); boolean persistDeployAsIs = deployAsIsHelper.persistTemplateOVFInformationAndUpdateGuestOS(tmpTemplate.getId(), ovfInformationTO, tmpTemplateDataStore); if (!persistDeployAsIs) { - logger.info("Failed persisting deploy-as-is template details for template " + template.getName()); + logger.info("Failed persisting deploy-as-is template details for template {}", template); break; } } @@ -428,7 +430,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - msg = "Multi-disk OVA template " + tmpTemplate.getUuid() + " failed to process data disks"; + msg = String.format("Multi-disk OVA template %s failed to process data disks", tmpTemplate); logger.error(msg); sendAlert = true; break; @@ -447,7 +449,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.getPhysicalSize(), tmpTemplateDataStore.getSize(), VirtualMachineTemplate.class.getName(), tmpTemplate.getUuid()); if (logger.isDebugEnabled()) { - logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully"); + logger.debug("Template {} uploaded successfully", tmpTemplate); } break; case IN_PROGRESS: @@ -460,7 +462,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - msg = "Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Template %s failed to upload due to operation timed out", tmpTemplate); logger.error(msg); sendAlert = true; } else { @@ -472,7 +474,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); - msg = "Template " + tmpTemplate.getUuid() + " failed to upload. Error details: " + answer.getDetails(); + msg = String.format("Template %s failed to upload. Error details: %s", tmpTemplate, answer.getDetails()); logger.error(msg); sendAlert = true; break; @@ -482,7 +484,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED); tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationTimeout, null, _templateDao); - msg = "Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"; + msg = String.format("Template %s failed to upload due to operation timed out", tmpTemplate); logger.error(msg); sendAlert = true; } diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index bd0c0eff1bce..6bb0dec26d8a 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -203,11 +203,13 @@ public void setDisconnected() { } public void logDisconnect() { - logger.warn("Unable to monitor download progress of " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); + logger.warn("Unable to monitor download progress of {} : uuid: {}({}) at host {}", + object.getType(), object.getId(), object, _ssAgent.getId()); } public void log(String message, Level level) { - logger.log(level, message + ", " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); + logger.log(level, "{}, {}: {}({}) at host {}", + message, object.getType(), object.getId(), object, _ssAgent.getId()); } public DownloadListener(DownloadMonitorImpl monitor) { diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 78221d65d59c..62d4de8761dc 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -125,18 +125,18 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) } if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(pool.getClusterId())) { - throw new ConnectionException(true, "Unable to prepare OCFS2 nodes for pool " + pool.getId()); + throw new ConnectionException(true, String.format("Unable to prepare OCFS2 nodes for pool %s", pool)); } Long hostId = host.getId(); if (logger.isDebugEnabled()) { - logger.debug("Host " + hostId + " connected, connecting host to shared pool id " + pool.getId() + " and sending storage pool information ..."); + logger.debug("Host {} connected, connecting host to shared pool {} and sending storage pool information ...", host, pool); } try { _storageManager.connectHostToSharedPool(hostId, pool.getId()); _storageManager.createCapacityEntry(pool.getId()); } catch (Exception e) { - throw new ConnectionException(true, "Unable to connect host " + hostId + " to storage pool id " + pool.getId() + " due to " + e.toString(), e); + throw new ConnectionException(true, String.format("Unable to connect host %s to storage pool %s due to %s", host, pool, e.toString()), e); } } } @@ -158,7 +158,7 @@ public synchronized boolean processDisconnect(long agentId, Status state) { List storagePoolHosts = _storageManager.findStoragePoolsConnectedToHost(host.getId()); if (storagePoolHosts == null) { if (logger.isTraceEnabled()) { - logger.trace("No pools to disconnect for host: " + host.getId()); + logger.trace("No pools to disconnect for host: {}", host); } return true; } @@ -182,7 +182,7 @@ public synchronized boolean processDisconnect(long agentId, Status state) { try { _storageManager.disconnectHostFromSharedPool(host.getId(), pool.getId()); } catch (Exception e) { - logger.error("Unable to disconnect host " + host.getId() + " from storage pool id " + pool.getId() + " due to " + e.toString()); + logger.error("Unable to disconnect host {} from storage pool {} due to {}", host, pool, e.toString()); disconnectResult = false; } } From 6b6ae03d5555bfff9a7357209d21a6cca920806c Mon Sep 17 00:00:00 2001 From: Vishesh Date: Wed, 13 Nov 2024 14:54:40 +0530 Subject: [PATCH 07/22] fixup --- .../java/com/cloud/agent/manager/DirectAgentAttache.java | 2 +- .../cloudstack/storage/image/store/TemplateObject.java | 2 +- .../apache/cloudstack/storage/snapshot/SnapshotObject.java | 2 +- .../storage/datastore/provider/DefaultHostListener.java | 2 +- .../org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java | 4 ++-- .../driver/CloudStackPrimaryDataStoreDriverImpl.java | 5 +---- .../datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java | 1 - 7 files changed, 7 insertions(+), 11 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 6f648f5dda06..b31348444fd0 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -116,7 +116,7 @@ public void process(Answer[] answers) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); logger.info( - "StartupAnswer received [id: {} name: {} Interval: {}]", + "StartupAnswer received [id: {} name: {} interval: {}]", startup.getHostId(), startup.getHostName(), interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index 9eb087603911..c12cafad99d0 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -599,7 +599,7 @@ public boolean isFollowRedirects() { @Override public String toString() { - return "VolumeObject{" + + return "TemplateObject{" + "templateVO=" + getImage() + ", dataStore=" + getDataStore() + '}'; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 5a3baceb7133..e71a01fb4c5c 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -469,7 +469,7 @@ public Class getEntityType() { @Override public String toString() { - return "VolumeObject{" + + return "SnapshotObject{" + "snapshotVO=" + getSnapshotVO() + ", dataStore=" + getDataStore() + '}'; diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index 829f2c270f57..d734067384f8 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -149,7 +149,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep } assert (answer instanceof ModifyStoragePoolAnswer) : String.format( - "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%sHost=%d", pool, hostId); + "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%s Host=%d", pool, hostId); ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) { String datastoreName = mspAnswer.getLocalDatastoreName(); diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index 0692726852a0..e794cef05803 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -155,7 +155,7 @@ private boolean isVMActivityOnHost(Host agent, DateTime suspectTime) throws HACh for (StoragePool pool : poolVolMap.keySet()) { activityStatus = verifyActivityOfStorageOnHost(poolVolMap, pool, agent, suspectTime, activityStatus); if (!activityStatus) { - logger.warn("It seems that the storage pool [{}] does not have activity on {}.", pool, agent.toString()); + logger.warn("It seems that the storage pool [{}] does not have activity on {}.", pool, agent); break; } } @@ -180,7 +180,7 @@ protected boolean verifyActivityOfStorageOnHost(HashMap callback) { - logger.debug( - "Copying volume [id: {}, uuid: {}, type:{}] to [id: {} uuid: {}, type: {}]", - srcdata.getId(), srcdata.getUuid(), srcdata.getType(), - destData.getId(), destData.getUuid(), destData.getType()); + logger.debug("Copying volume [{}] to [{}]", srcdata, destData); boolean encryptionRequired = anyVolumeRequiresEncryption(srcdata, destData); DataStore store = destData.getDataStore(); if (store.getRole() == DataStoreRole.Primary) { diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java index 50a8ebf8aab8..921dd3d4d9f9 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriverTest.java @@ -500,7 +500,6 @@ public void deleteSourceVolumeFailureScenarioWhenNoSDCisFound() { String srcVolumePath = "bec0ba7700000007:vol-11-6aef-10ee"; DataStore srcStore = Mockito.mock(DataStore.class); - when(srcStore.getId()).thenReturn(1L); DataTO volumeTO = Mockito.mock(DataTO.class); when(srcData.getDataStore()).thenReturn(srcStore); when(srcData.getTO()).thenReturn(volumeTO); From 7e0d8484006ae4638e5f12dc5a535c487aaa8b03 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Thu, 14 Nov 2024 18:22:30 +0530 Subject: [PATCH 08/22] Address comments --- .../src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java | 4 ++-- .../storage/datastore/manager/ScaleIOSDCManagerImpl.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 58e9d26339fb..28e6d796f87f 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1321,7 +1321,7 @@ public void orchestrateStart(final String vmUuid, final Map details = new HashMap<>(); details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); PrepareStorageClientCommand cmd = new PrepareStorageClientCommand(((PrimaryDataStore) dataStore).getPoolType(), dataStore.getUuid(), details); From d28ffabf36f6ac99c1cd482ec1be00cb9efe3da4 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Wed, 27 Nov 2024 16:25:23 +0530 Subject: [PATCH 09/22] Replace ids with objects or uuids --- .../com/cloud/agent/api/StartupAnswer.java | 8 +- .../subsystem/api/storage/EndPoint.java | 2 + .../main/java/com/cloud/agent/Listener.java | 8 + .../com/cloud/capacity/CapacityManager.java | 2 +- .../com/cloud/storage/StorageManager.java | 2 +- .../com/cloud/agent/manager/AgentAttache.java | 21 +- .../cloud/agent/manager/AgentManagerImpl.java | 90 ++++--- .../agent/manager/ClusteredAgentAttache.java | 8 +- .../manager/ClusteredAgentManagerImpl.java | 63 ++--- .../manager/ClusteredDirectAgentAttache.java | 4 +- .../agent/manager/ConnectedAgentAttache.java | 4 +- .../agent/manager/DirectAgentAttache.java | 30 ++- .../com/cloud/agent/manager/DummyAttache.java | 4 +- .../cloud/vm/VirtualMachineManagerImpl.java | 19 +- .../orchestration/NetworkOrchestrator.java | 39 +-- .../agent/manager/AgentManagerImplTest.java | 2 +- .../manager/ConnectedAgentAttacheTest.java | 16 +- .../agent/manager/DirectAgentAttacheTest.java | 6 +- .../cloud/offerings/NetworkOfferingVO.java | 5 +- .../storage/datastore/db/ObjectStoreVO.java | 8 + .../cloudstack/storage/LocalHostEndpoint.java | 5 + .../storage/RemoteHostEndPoint.java | 7 + .../BasePrimaryDataStoreLifeCycleImpl.java | 2 +- .../provider/DefaultHostListener.java | 10 +- ...BasePrimaryDataStoreLifeCycleImplTest.java | 2 +- .../com/cloud/cluster/ClusterManagerImpl.java | 6 +- .../com/cloud/utils/db/GenericDaoBase.java | 4 + .../baremetal/manager/BareMetalPlanner.java | 2 +- .../kvm/ha/KVMHostActivityChecker.java | 5 +- .../cluster/KubernetesClusterManagerImpl.java | 2 +- ...esClusterResourceModifierActionWorker.java | 2 +- .../AdaptiveDataStoreLifeCycleImpl.java | 4 +- .../ElastistorPrimaryDataStoreLifeCycle.java | 4 +- .../DateraPrimaryDataStoreLifeCycle.java | 4 +- ...oudStackPrimaryDataStoreLifeCycleImpl.java | 26 +- ...tackPrimaryDataStoreLifeCycleImplTest.java | 3 +- .../LinstorPrimaryDataStoreLifeCycleImpl.java | 4 +- .../NexentaPrimaryDataStoreLifeCycle.java | 2 +- .../ScaleIOPrimaryDataStoreLifeCycle.java | 4 +- .../provider/ScaleIOHostListener.java | 10 +- .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 1 - .../SolidFirePrimaryDataStoreLifeCycle.java | 4 +- ...idFireSharedPrimaryDataStoreLifeCycle.java | 2 +- .../StorPoolPrimaryDataStoreLifeCycle.java | 2 +- .../cloud/capacity/CapacityManagerImpl.java | 181 +++++++------- .../deploy/DeploymentPlanningManagerImpl.java | 54 ++--- .../network/NetworkUsageManagerImpl.java | 16 +- .../VirtualNetworkApplianceManagerImpl.java | 71 +++--- .../security/SecurityGroupListener.java | 13 +- .../RollingMaintenanceManagerImpl.java | 2 +- .../storage/ImageStoreUploadMonitorImpl.java | 2 +- .../com/cloud/storage/StorageManagerImpl.java | 223 +++++++++--------- .../storage/download/DownloadListener.java | 8 +- .../storage/listener/StoragePoolMonitor.java | 9 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 2 +- .../cloud/capacity/CapacityManagerTest.java | 4 +- .../DeploymentPlanningManagerImplTest.java | 4 +- .../listener/StoragePoolMonitorTest.java | 6 +- .../java/com/cloud/vm/UserVmManagerTest.java | 2 +- 59 files changed, 570 insertions(+), 485 deletions(-) diff --git a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java index ebd44b2a76ad..c619ce75ace6 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/StartupAnswer.java @@ -22,14 +22,16 @@ public class StartupAnswer extends Answer { long hostId; String hostName; + String hostUuid; int pingInterval; protected StartupAnswer() { } - public StartupAnswer(StartupCommand cmd, long hostId, String hostName, int pingInterval) { + public StartupAnswer(StartupCommand cmd, long hostId, String hostUuid, String hostName, int pingInterval) { super(cmd); this.hostId = hostId; + this.hostUuid = hostUuid; this.hostName = hostName; this.pingInterval = pingInterval; } @@ -42,6 +44,10 @@ public long getHostId() { return hostId; } + public String getHostUuid() { + return hostUuid; + } + public String getHostName() { return hostName; } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java index 254c91d35449..df78928ddc36 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPoint.java @@ -24,6 +24,8 @@ public interface EndPoint { long getId(); + String getUuid(); + String getHostAddr(); String getPublicAddr(); diff --git a/engine/components-api/src/main/java/com/cloud/agent/Listener.java b/engine/components-api/src/main/java/com/cloud/agent/Listener.java index 843a634b4c00..ceba5f34b829 100644 --- a/engine/components-api/src/main/java/com/cloud/agent/Listener.java +++ b/engine/components-api/src/main/java/com/cloud/agent/Listener.java @@ -43,6 +43,10 @@ public interface Listener { */ boolean processAnswers(long agentId, long seq, Answer[] answers); + default boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { + return processAnswers(agentId, seq, answers); + } + /** * This method is called by the AgentManager when an agent sent * a command to the server. In order to process these commands, @@ -92,6 +96,10 @@ public interface Listener { */ boolean processDisconnect(long agentId, Status state); + default boolean processDisconnect(long agentId, String uuid, String name, Status state) { + return processDisconnect(agentId, state); + } + /** * This method is called by AgentManager when a host is about to be removed from a cluster. * @param long the ID of the host that's about to be removed diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index e1bb10f5d268..cbd137e86826 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -140,7 +140,7 @@ public interface CapacityManager { * @param ram required RAM * @param cpuOverprovisioningFactor factor to apply to the actual host cpu */ - boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, + boolean checkIfHostHasCapacity(Host host, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, boolean considerReservedCapacity); void updateCapacityForHost(Host host); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index b51536688990..36780d4e260a 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -365,7 +365,7 @@ static Boolean getFullCloneConfiguration(Long storeId) { String getStoragePoolMountFailureReason(String error); - boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index 173fd9fc704a..c88eeae70495 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -110,6 +110,7 @@ public int compare(final Object o1, final Object o2) { protected static String LOG_SEQ_FORMATTED_STRING; protected final long _id; + protected String _uuid; protected String _name = null; protected final ConcurrentHashMap _waitForList; protected final LinkedList _requests; @@ -133,8 +134,9 @@ public int compare(final Object o1, final Object o2) { Arrays.sort(s_commandsNotAllowedInConnectingMode); } - protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final boolean maintenance) { + protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final boolean maintenance) { _id = id; + _uuid = uuid; _name = name; _waitForList = new ConcurrentHashMap(); _currentSequence = null; @@ -145,6 +147,15 @@ protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final Str LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id); } + @Override + public String toString() { + return "AgentAttache{" + + "id=" + _id + + ", uuid='" + _uuid + '\'' + + ", name='" + _name + '\'' + + '}'; + } + public synchronized long getNextSequence() { return ++_nextSequence; } @@ -206,7 +217,7 @@ protected synchronized void cancel(final long seq) { logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling."); final Listener listener = _waitForList.remove(seq); if (listener != null) { - listener.processDisconnect(_id, Status.Disconnected); + listener.processDisconnect(_id, _uuid, _name, Status.Disconnected); } int index = findRequest(seq); if (index >= 0) { @@ -243,6 +254,10 @@ public long getId() { return _id; } + public String getUuid() { + return _uuid; + } + public String getName() { return _name; } @@ -316,7 +331,7 @@ protected void cancelAllCommands(final Status state, final boolean cancelActive) it.remove(); final Listener monitor = entry.getValue(); logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass()); - monitor.processDisconnect(_id, state); + monitor.processDisconnect(_id, _uuid, _name, state); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 4be53d9f19be..caefc81848e6 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -302,8 +302,7 @@ private AgentControlAnswer handleControlCommand(final AgentAttache attache, fina } } - logger.warn("No handling of agent control command: {} sent from [id: {} name: {}]", - cmd, attache.getId(), attache.getName()); + logger.warn("No handling of agent control command: {} sent from {}", cmd, attache); return new AgentControlAnswer(cmd); } @@ -513,7 +512,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { return; } final long hostId = attache.getId(); - logger.debug("Remove Agent : [id: {}, name: {}]", hostId, attache.getName()); + logger.debug("Remove Agent : {}", attache); AgentAttache removed = null; boolean conflict = false; synchronized (_agents) { @@ -525,8 +524,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { } } if (conflict) { - logger.debug("Agent for host [id: {}, name: {}] is created when it is being disconnected", - hostId, attache.getName()); + logger.debug("Agent for host {} is created when it is being disconnected", attache); } if (removed != null) { removed.disconnect(nextState); @@ -534,7 +532,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { for (final Pair monitor : _hostMonitors) { logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName()); - monitor.second().processDisconnect(hostId, nextState); + monitor.second().processDisconnect(hostId, attache.getUuid(), attache.getName(), nextState); } } @@ -571,16 +569,12 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi } } else if (e instanceof HypervisorVersionChangedException) { handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); - throw new CloudRuntimeException(String.format( - "Unable to connect [id: %d, name: %s]", - attache.getId(), attache.getName()), e); + throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); } else { logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), host, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); - throw new CloudRuntimeException(String.format( - "Unable to connect [id: %d, name: %s]", - attache.getId(), attache.getName()), e); + throw new CloudRuntimeException(String.format("Unable to connect %s", attache), e); } } } @@ -745,14 +739,14 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for return h == null ? false : true; } else { - _executor.execute(new SimulateStartTask(host.getId(), resource, host.getDetails())); + _executor.execute(new SimulateStartTask(host.getId(), host.getUuid(), host.getName(), resource, host.getDetails())); return true; } } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { logger.debug("create DirectAgentAttache for {}", host); - final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); + final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { @@ -777,7 +771,7 @@ public boolean stop() { for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - logger.debug("Cannot find host [id: {}, name: {}]", agent.getId(), agent.getName()); + logger.debug("Cannot find host {}", agent); } else { if (!agent.forForward()) { agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); @@ -817,19 +811,18 @@ protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attach GlobalLock joinLock = getHostJoinLock(hostId); if (joinLock.lock(60)) { try { - logger.info("Host [id: {}, name: {}] is disconnecting with event {}", - hostId, attache.getName(), event); + logger.info("Host {} is disconnecting with event {}", + attache, event); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.warn("Can't find host with {} (name: {})", hostId, attache.getName()); + logger.warn("Can't find host with {} ({})", hostId, attache); nextStatus = Status.Removed; } else { nextStatus = getNextStatusOnDisconnection(host, event); caService.purgeHostCertificate(host); } - logger.debug("Deregistering link for [id: {}, name: {}] with state {}", - hostId, attache.getName(), nextStatus); + logger.debug("Deregistering link for {} with state {}", attache, nextStatus); removeAgent(attache, nextStatus); @@ -870,7 +863,7 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, // if state cannot be determined do nothing and bail out if (determinedState == null) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", + logger.warn("Agent {} state cannot be determined for more than {} ({}) seconds, will go to Alert state", host, AlertWait, AlertWait.value()); determinedState = Status.Alert; } else { @@ -909,7 +902,7 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, } else if (currentStatus == Status.Up) { final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); final HostPodVO podVO = _podDao.findById(host.getPodId()); - final String hostDesc = "name: " + host.getName() + " (id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + final String hostDesc = "name: " + host.getName() + " (id:" + host.getUuid() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host disconnected, " + hostDesc, "If the agent for host [" + hostDesc + "] is not restarted within " + AlertWait + " seconds, host will go to Alert state"); @@ -1064,9 +1057,9 @@ public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host {}", hostId); AgentAttache attache = null; attache = findAttache(hostId); + logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); } @@ -1076,7 +1069,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A try { reconnect(hostId); } catch (CloudRuntimeException e) { - logger.debug("Error on shutdown request for hostID: {}", hostId, e); + logger.debug("Error on shutdown request for hostID: {} ({})", hostId, findAttache(hostId), e); return false; } return true; @@ -1092,7 +1085,7 @@ public boolean isAgentAttached(final long hostId) { protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { logger.debug("create ConnectedAgentAttache for {}", host); - final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); + final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -1193,8 +1186,10 @@ protected class SimulateStartTask extends ManagedContextRunnable { ServerResource resource; Map details; long id; + String uuid; + String name; - public SimulateStartTask(final long id, final ServerResource resource, final Map details) { + public SimulateStartTask(final long id, String uuid, String name, final ServerResource resource, final Map details) { this.id = id; this.resource = resource; this.details = details; @@ -1203,26 +1198,26 @@ public SimulateStartTask(final long id, final ServerResource resource, final Map @Override protected void runInContext() { try { - logger.debug("Simulating start for resource {} id {}", resource.getName(), id); + logger.debug("Simulating start for resource {} (id: {}, uuid: {}, name; {})", resource.getName(), id, uuid, name); if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { final AgentAttache agentattache = findAttache(id); if (agentattache == null) { - logger.debug("Creating agent for host {}", id); + logger.debug("Creating agent for host [id: {}, uuid: {}, name: {}]", id, uuid, name); _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); - logger.debug("Completed creating agent for host {}", id); + logger.debug("Completed creating agent for host [id: {}, uuid: {}, name: {}", id, uuid, name); } else { - logger.debug("Agent already created in another thread for host {}, ignore this", id); + logger.debug("Agent already created in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name); } } finally { tapLoadingAgents(id, TapAgentsAction.Del); } } else { - logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id); + logger.debug("Agent creation already getting processed in another thread for host [id: {}, uuid: {}, name: {}], ignore this", id, uuid, name); } } catch (final Exception e) { - logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e); + logger.warn("Unable to simulate start on resource [id: {}, uuid: {}, name: {}] name {}", id, uuid, name, resource.getName(), e); } } } @@ -1262,7 +1257,7 @@ protected void connectAgent(final Link link, final Command[] cmds, final Request cmd = cmds[i]; if (cmd instanceof StartupRoutingCommand || cmd instanceof StartupProxyCommand || cmd instanceof StartupSecondaryStorageCommand || cmd instanceof StartupStorageCommand) { - answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, "", mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer((StartupCommand) cmds[i], 0, "", "", mgmtServiceConf.getPingInterval()); break; } } @@ -1352,11 +1347,11 @@ protected void processRequest(final Link link, final Request request) { if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { logD = false; - logger.debug("Ping from Routing host {}({})", hostId, hostName); + logger.debug("Ping from Routing host {}", attache); logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else if (cmd instanceof PingCommand) { logD = false; - logger.debug("Ping from {}({})", hostId, hostName); + logger.debug("Ping from {}", attache); logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else { logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request); @@ -1371,22 +1366,20 @@ protected void processRequest(final Link link, final Request request) { if (cmd instanceof StartupRoutingCommand) { final StartupRoutingCommand startup = (StartupRoutingCommand) cmd; processStartupRoutingCommand(startup, hostId); - answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupProxyCommand) { final StartupProxyCommand startup = (StartupProxyCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupSecondaryStorageCommand) { final StartupSecondaryStorageCommand startup = (StartupSecondaryStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof StartupStorageCommand) { final StartupStorageCommand startup = (StartupStorageCommand) cmd; - answer = new StartupAnswer(startup, attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); + answer = new StartupAnswer(startup, attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - logger.info( - "Host [id: {}, name: {}] has informed us that it is shutting down with reason {} and detail {}", - attache.getId(), attache.getName(), reason, shutdown.getDetail()); + logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache, reason, shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); @@ -1434,7 +1427,7 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - logger.debug("Cant not find host id: {}(name: {})", attache.getId(), attache.getName()); + logger.debug("Cant not find host with id: {} ({})", attache.getId(), attache); } answer = new Answer(cmd); } else { @@ -1466,8 +1459,7 @@ protected void processResponse(final Link link, final Response response) { if (attache == null) { logger.warn("Unable to process: {}", response); } else if (!attache.processAnswers(response.getSequence(), response)) { - logger.info("Host [id: {}, name: {}] - Seq {}: Response is not processed: {}", - attache.getId(), attache.getName(), response.getSequence(), response); + logger.info("Host {} - Seq {}: Response is not processed: {}", attache, response.getSequence(), response); } } @@ -1627,7 +1619,7 @@ public boolean handleDirectConnectAgent(final Host host, final StartupCommand[] attache = createAttacheForDirectConnect(host, resource); final StartupAnswer[] answers = new StartupAnswer[cmds.length]; for (int i = 0; i < answers.length; i++) { - answers[i] = new StartupAnswer(cmds[i], attache.getId(), attache.getName(), mgmtServiceConf.getPingInterval()); + answers[i] = new StartupAnswer(cmds[i], attache.getId(), attache.getUuid(), attache.getName(), mgmtServiceConf.getPingInterval()); } attache.process(answers); @@ -1677,7 +1669,7 @@ public Long getAgentPingTime(final long agentId) { public void pingBy(final long agentId) { // Update PingMap with the latest time if agent entry exists in the PingMap if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) { - logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); + logger.info("PingMap for agent: {} ({}) will not be updated because agent is no longer in the PingMap", agentId, findAttache(agentId)); } } @@ -1705,7 +1697,7 @@ protected void runInContext() { if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) { - logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId()); + logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { logger.debug("Ping timeout for agent {}, do investigation", h); @@ -1930,7 +1922,7 @@ private void sendCommandToAgents(Map> hostsPerZone, Map(); } - public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) { - super(agentMgr, id, name, link, maintenance); + public ClusteredAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) { + super(agentMgr, id, uuid, name, link, maintenance); _forward = link == null; _transferRequests = new LinkedList(); } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index a85c7652d594..be327418205b 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -247,7 +247,7 @@ public Task create(final Task.Type type, final Link link, final byte[] data) { protected AgentAttache createAttache(final HostVO host) { logger.debug("create forwarding ClusteredAgentAttache for {}", host); long id = host.getId(); - final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); + final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getUuid(), host.getName()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(id); @@ -263,7 +263,7 @@ protected AgentAttache createAttache(final HostVO host) { @Override protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { logger.debug("create ClusteredAgentAttache for {}", host); - final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); + final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getUuid(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; synchronized (_agents) { @@ -279,7 +279,7 @@ protected AgentAttache createAttacheForConnect(final HostVO host, final Link lin @Override protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { logger.debug("Create ClusteredDirectAgentAttache for {}.", host); - final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); + final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getUuid(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(host.getId()); @@ -322,8 +322,8 @@ protected boolean handleDisconnect(final AgentAttache agent, final Status.Event @Override public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host {}", hostId); final AgentAttache attache = findAttache(hostId); + logger.debug("Received agent disconnect event for host {} ({})", hostId, attache); if (attache != null) { // don't process disconnect if the host is being rebalanced if (isAgentRebalanceEnabled()) { @@ -331,8 +331,8 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { logger.debug( - "Not processing {} event for the host [id: {}, name: {}] as the host is being connected to {}", - Event.AgentDisconnected, hostId, attache.getName(), _nodeId); + "Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is being connected to {}", + Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId); return true; } } @@ -342,8 +342,8 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A // but the host has already reconnected to the current management server if (!attache.forForward()) { logger.debug( - "Not processing {} event for the host [id: {}, name: {}] as the host is directly connected to the current management server {}", - Event.AgentDisconnected, hostId, attache.getName(), _nodeId); + "Not processing {} event for the host [id: {}, uuid: {}, name: {}] as the host is directly connected to the current management server {}", + Event.AgentDisconnected, hostId, attache.getUuid(), attache.getName(), _nodeId); return true; } @@ -749,7 +749,7 @@ public boolean executeRebalanceRequest(final long agentId, final long currentOwn try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id={}", agentId, e); + logger.warn("Unable to rebalance host id={} ({})", agentId, findAttache(agentId), e); } } return result; @@ -836,7 +836,7 @@ public void startRebalanceAgents() { boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - logger.warn("Somebody else is already rebalancing host id: {}", host); + logger.warn("Somebody else is already rebalancing host: {}", host); continue; } @@ -909,7 +909,7 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws return null; } - logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId); + logger.debug("Propagating agent change request event: {} to agent: {} ({})", event.toString(), agentId, findAttache(agentId)); final Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -949,14 +949,14 @@ protected void runInContext() { final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut)); if (transferMap == null) { - logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId); + logger.debug("Timed out waiting for the host id={} ({}) to be ready to transfer, skipping rebalance for the host", hostId, attache); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId); + logger.debug(String.format("Management server %d doesn't own host id=%d (%s) any more, skipping rebalance for the host", _nodeId, hostId, attache)); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -964,7 +964,7 @@ protected void runInContext() { final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { - logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms); + logger.debug("Can't transfer host {} ({}) as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, attache, ms); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -975,13 +975,13 @@ protected void runInContext() { try { _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { - logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId); + logger.warn("Failed to submit rebalance task for host id={} ({}); postponing the execution", hostId, attache); continue; } } else { - logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}", - hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); + logger.debug("Agent {} ({}) can't be transferred yet as its request queue size is {} and listener queue size is {}", + hostId, attache, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } } } else { @@ -997,7 +997,7 @@ protected void runInContext() { } private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { - logger.debug("Adding agent {} to the list of agents to transfer", hostId); + logger.debug("Adding agent {} ({}) to the list of agents to transfer", hostId, findAttache(hostId)); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } @@ -1019,7 +1019,7 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } } catch (final Exception ex) { - logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex); + logger.warn("Host {} ({}) failed to connect to the management server {} as a part of rebalance process", hostId, findAttache(hostId), futureOwnerId, ex); result = false; } @@ -1066,9 +1066,10 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { final boolean success = event == Event.RebalanceCompleted ? true : false; - logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event); final AgentAttache attache = findAttache(hostId); + logger.debug("Finishing rebalancing for the agent {} ({}) with event {}", hostId, attache, event); + if (attache == null || !(attache instanceof ClusteredAgentAttache)) { logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId); _hostTransferDao.completeAgentTransfer(hostId); @@ -1085,7 +1086,8 @@ protected void finishRebalance(final long hostId, final long futureOwnerId, fina // 2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { - logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId); + logger.debug("Forwarding request {} held in transfer attache [id: {}, uuid: {}, name: {}] from the management server {} to {}", + requestToTransfer.getSequence(), hostId, attache.getUuid(), attache.getName(), _nodeId, futureOwnerId); final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes()); if (!routeResult) { logD(requestToTransfer.getBytes(), "Failed to route request to peer"); @@ -1094,23 +1096,25 @@ protected void finishRebalance(final long hostId, final long futureOwnerId, fina requestToTransfer = forwardAttache.getRequestToTransfer(); } - logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId); + logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance to {}", + _nodeId, hostId, attache.getUuid(), attache.getName(), futureOwnerId); } else { failRebalance(hostId); } - logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId); + logger.debug("Management server {} completed agent [id: {}, uuid: {}, name: {}] rebalance", _nodeId, hostId, attache.getUuid(), attache.getName()); _hostTransferDao.completeAgentTransfer(hostId); } protected void failRebalance(final long hostId) { + AgentAttache attache = findAttache(hostId); try { - logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId); + logger.debug("Management server {} failed to rebalance agent {} ({})", _nodeId, hostId, attache); _hostTransferDao.completeAgentTransfer(hostId); handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true); } catch (final Exception ex) { - logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId); + logger.warn("Failed to reconnect host id={} ({}) as a part of failed rebalance task cleanup", hostId, attache); } } @@ -1138,7 +1142,7 @@ protected boolean startRebalance(final long hostId) { if (attache == null) { logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", host, _nodeId); } else { - logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing", + logger.warn("Attache for the agent {} has request queue size {} and listener queue size {}, can't start host rebalancing", host, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } return false; @@ -1174,11 +1178,12 @@ public RebalanceTask(final long hostId, final long currentOwnerId, final long fu @Override protected void runInContext() { + AgentAttache attache = findAttache(hostId); try { - logger.debug("Rebalancing host id={}", hostId); + logger.debug("Rebalancing host id={} ({})", hostId, attache); rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id={}", hostId, e); + logger.warn("Unable to rebalance host id={} ({})", hostId, attache, e); } } } @@ -1267,7 +1272,7 @@ public String dispatch(final ClusterServicePdu pdu) { } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) { final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0]; - logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId()); + logger.debug("Intercepting command to propagate event {} for host {} ({})", () -> cmd.getEvent().name(), cmd::getHostId, () -> _hostDao.findById(cmd.getHostId())); boolean result = false; try { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java index 3d18951fb726..e36ea6cedc13 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredDirectAgentAttache.java @@ -26,8 +26,8 @@ public class ClusteredDirectAgentAttache extends DirectAgentAttache implements Routable { private final long _nodeId; - public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String name, long mgmtId, ServerResource resource, boolean maintenance) { - super(agentMgr, id, name, resource, maintenance); + public ClusteredDirectAgentAttache(ClusteredAgentManagerImpl agentMgr, long id, String uuid, String name, long mgmtId, ServerResource resource, boolean maintenance) { + super(agentMgr, id, uuid, name, resource, maintenance); _nodeId = mgmtId; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java index c8e24301b292..523f98fd0108 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -31,8 +31,8 @@ public class ConnectedAgentAttache extends AgentAttache { protected Link _link; - public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String name, final Link link, final boolean maintenance) { - super(agentMgr, id, name, maintenance); + public ConnectedAgentAttache(final AgentManagerImpl agentMgr, final long id, final String uuid, final String name, final Link link, final boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); _link = link; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index b31348444fd0..07d5bf803932 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -51,8 +51,8 @@ public class DirectAgentAttache extends AgentAttache { AtomicInteger _outstandingTaskCount; AtomicInteger _outstandingCronTaskCount; - public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, ServerResource resource, boolean maintenance) { - super(agentMgr, id, name, maintenance); + public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String uuid,String name, ServerResource resource, boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); _resource = resource; _outstandingTaskCount = new AtomicInteger(0); _outstandingCronTaskCount = new AtomicInteger(0); @@ -60,7 +60,7 @@ public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, Serve @Override public void disconnect(Status state) { - logger.debug("Processing disconnect {}({})", _id, _name); + logger.debug("Processing disconnect [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); for (ScheduledFuture future : _futures) { future.cancel(false); @@ -115,9 +115,7 @@ public void process(Answer[] answers) { if (answers != null && answers[0] instanceof StartupAnswer) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); - logger.info( - "StartupAnswer received [id: {} name: {} interval: {}]", - startup.getHostId(), startup.getHostName(), interval); + logger.info("StartupAnswer received [id: {}, uuid: {}, name: {}, interval: {}]", startup.getHostId(), startup.getHostUuid(), startup.getHostName(), interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } } @@ -128,7 +126,7 @@ protected void finalize() throws Throwable { assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?"; synchronized (this) { if (_resource != null) { - logger.warn("Lost attache for {}({})", _id, _name); + logger.warn("Lost attache for [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); disconnect(Status.Alert); } } @@ -142,8 +140,8 @@ private synchronized void queueTask(Task task) { } private synchronized void scheduleFromQueue() { - logger.trace("Agent attache [id: {}, name: {}], task queue size={}, outstanding tasks={}", - _id, _name, tasks.size(), _outstandingTaskCount.get()); + logger.trace("Agent attache [id: {}, uuid: {}, name: {}], task queue size={}, outstanding tasks={}", + _id, _uuid, _name, tasks.size(), _outstandingTaskCount.get()); while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); _agentMgr.getDirectAgentPool().execute(tasks.remove()); @@ -156,8 +154,8 @@ protected synchronized void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { logger.warn( - "PingTask execution for direct attache [id: {}, name: {}] has reached maximum outstanding limit({}), bailing out", - _id, _name, _agentMgr.getDirectAgentThreadCap()); + "PingTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _uuid, _name, _agentMgr.getDirectAgentThreadCap()); return; } @@ -172,21 +170,21 @@ protected synchronized void runInContext() { } if (cmd == null) { - logger.warn("Unable to get current status on {}({})", _id, _name); + logger.warn("Unable to get current status on [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); return; } if (cmd.getContextParam("logid") != null) { ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - logger.debug("Ping from {}({})", _id, _name); + logger.debug("Ping from [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); long seq = _seq++; logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString()); _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd}); } else { - logger.debug("Unable to send ping because agent is disconnected {}", _id, _name); + logger.debug("Unable to send ping because agent is disconnected [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); } } catch (Exception e) { logger.warn("Unable to complete the ping task", e); @@ -225,8 +223,8 @@ protected void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { logger.warn( - "CronTask execution for direct attache [id: {}, name: {}] has reached maximum outstanding limit({}), bailing out", - _id, _name, _agentMgr.getDirectAgentThreadCap()); + "CronTask execution for direct attache [id: {}, uuid: {}, name: {}] has reached maximum outstanding limit({}), bailing out", + _id, _uuid, _name, _agentMgr.getDirectAgentThreadCap()); bailout(); return; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java index 7ee524076bb8..2f15e7af43c3 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DummyAttache.java @@ -22,8 +22,8 @@ public class DummyAttache extends AgentAttache { - public DummyAttache(AgentManagerImpl agentMgr, long id, String name, boolean maintenance) { - super(agentMgr, id, name, maintenance); + public DummyAttache(AgentManagerImpl agentMgr, long id, String uuid, String name, boolean maintenance) { + super(agentMgr, id, uuid, name, maintenance); } @Override diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 28e6d796f87f..1bf7ef5065e4 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -480,7 +480,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { - logger.info("allocating virtual machine from template:{} with hostname:{} and {} networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size()); + logger.info("allocating virtual machine from template: {} with hostname: {} and {} networks", template, vmInstanceName, auxiliaryNetworks.size()); VMInstanceVO persistedVm = null; try { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); @@ -1196,8 +1196,9 @@ public void orchestrateStart(final String vmUuid, final Map _clusterDao.findById(rootVolClusterId), () -> _clusterDao.findById(clusterIdSpecified)); throw new ResourceUnavailableException( "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, Cluster.class, clusterIdSpecified); @@ -1390,7 +1391,7 @@ public void orchestrateStart(final String vmUuid, final Map _vmDao.findById(vmId)); _vmDao.resetVmPowerStateTracking(vmId); } } @@ -4842,7 +4843,7 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { case Running: try { if (vm.getHostId() != null && !vm.getHostId().equals(vm.getPowerHostId())) { - logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); + logger.info("Detected out of band VM migration from host {} to host {}", () -> _hostDao.findById(vm.getHostId()), () -> _hostDao.findById(vm.getPowerHostId())); } stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index f47abf658638..66b19205c6bd 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1922,7 +1922,7 @@ public void cleanupConfigForServicesInNetwork(List services, final Netwo long userId = User.UID_SYSTEM; //remove all PF/Static Nat rules for the network logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration", - services, network, network.getNetworkOfferingId()); + services::toString, network::toString, () -> _networkOfferingDao.findById(network.getNetworkOfferingId())); if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) { try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { @@ -2088,20 +2088,20 @@ private void setHypervisorHostnameInNetwork(VirtualMachineProfile vm, DeployDest } @DB - protected void updateNic(final NicVO nic, final long networkId, final int count) { + protected void updateNic(final NicVO nic, final Network network, final int count) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { _nicDao.update(nic.getId(), nic); if (nic.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id={} on {}", networkId, count); - _networksDao.changeActiveNicsBy(networkId, count); + logger.debug(String.format("Changing active number of nics for network id=%s on %d", network, count)); + _networksDao.changeActiveNicsBy(network.getId(), count); } if (nic.getVmType() == VirtualMachine.Type.User - || nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(networkId).getTrafficType() == TrafficType.Guest) { - _networksDao.setCheckForGc(networkId); + || nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(network.getId()).getTrafficType() == TrafficType.Guest) { + _networksDao.setCheckForGc(network.getId()); } } }); @@ -2128,8 +2128,9 @@ public int compare(final NicVO nic1, final NicVO nic2) { for (final NicVO nic : nics) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic {}", nic.getNetworkId(), nic); - throw new CloudRuntimeException(String.format("Failed to implement network id=%d as a part preparing nic %s", nic.getNetworkId(), nic)); + NetworkVO network = _networksDao.findById(nic.getNetworkId()); + logger.warn("Failed to implement network: {} as a part of preparing nic {}", network, nic); + throw new CloudRuntimeException(String.format("Failed to implement network id=%s as a part preparing nic %s", network, nic)); } final NetworkVO network = implemented.second(); @@ -2194,7 +2195,7 @@ public NicProfile prepareNic(final VirtualMachineProfile vmProfile, final Deploy Pair networks = getGuestNetworkRouterAndVpcDetails(vmProfile.getId()); setMtuDetailsInVRNic(networks, network, nic); } - updateNic(nic, network.getId(), 1); + updateNic(nic, network, 1); final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { @@ -2299,7 +2300,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) { - throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); + throw new CloudRuntimeException(String.format("Service provider %s either doesn't exist or is not enabled in physical network: %s", element.getProvider().getName(), _physicalNetworkDao.findById(network.getPhysicalNetworkId()))); } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { @@ -2324,10 +2325,10 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep if (nic == null && !addedURIs.contains(broadcastUri.toString())) { //Nic details are not available in DB //Create nic profile for migration - logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm); final NetworkVO network = _networksDao.findById(ntwkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NicProfile profile = new NicProfile(); + logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), network, vm); profile.setDeviceId(255); //dummyId profile.setIPv4Address(userIp.getAddress().toString()); profile.setIPv4Netmask(publicIp.getNetmask()); @@ -2467,7 +2468,7 @@ public Pair doInTransaction(final TransactionStatus status) applyProfileToNicForRelease(nic, profile); nic.setState(Nic.State.Allocated); if (originalState == Nic.State.Reserved) { - updateNic(nic, network.getId(), -1); + updateNic(nic, network, -1); } else { _nicDao.update(nic.getId(), nic); } @@ -2476,7 +2477,7 @@ public Pair doInTransaction(final TransactionStatus status) return new Pair<>(network, profile); } else { nic.setState(Nic.State.Allocated); - updateNic(nic, network.getId(), -1); + updateNic(nic, network, -1); } } @@ -3569,10 +3570,10 @@ public void reallyRun() { final Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", networkId, currentTime); + logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", () -> _networksDao.findById(networkId), () -> currentTime); stillFree.put(networkId, currentTime); } else if (time > currentTime - netGcWait) { - logger.debug("Network {} is still free but it's not time to shutdown yet: {}",networkId, time); + logger.debug("Network {} is still free but it's not time to shutdown yet: {}", () -> _networksDao.findById(networkId), time::toString); stillFree.put(networkId, time); } else { shutdownList.add(networkId); @@ -3599,7 +3600,7 @@ public void reallyRun() { shutdownNetwork(networkId, context, false); } catch (final Exception e) { - logger.warn("Unable to shutdown network: {}", networkId); + logger.warn("Unable to shutdown network: {}", () -> _networksDao.findById(networkId)); } } } @@ -4480,8 +4481,8 @@ public NicProfile createNicForVm(final Network network, final NicProfile request if (prepare) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id={} as a part of preparing nic {}", nic.getNetworkId(), nic); - throw new CloudRuntimeException(String.format("Failed to implement network id=%d as a part preparing nic %s", nic.getNetworkId(), nic)); + logger.warn("Failed to implement network {} as a part of preparing nic {}", network, nic); + throw new CloudRuntimeException(String.format("Failed to implement network %s as a part preparing nic %s", network, nic)); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network); @@ -4702,7 +4703,7 @@ public NicVO savePlaceholderNic(final Network network, final String ip4Address, @Override public Pair importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced) throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { - logger.debug("Allocating nic for vm {} in network {} during import", vm.getUuid(), network); + logger.debug("Allocating nic for vm {} in network {} during import", vm, network); String selectedIp = null; if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) { if (ipAddresses.getIp4Address().equals("auto")) { diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java index 376e189d8751..452cfd90056f 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/AgentManagerImplTest.java @@ -47,7 +47,7 @@ public void setUp() throws Exception { host = new HostVO("some-Uuid"); host.setDataCenterId(1L); cmds = new StartupCommand[]{new StartupRoutingCommand()}; - attache = new ConnectedAgentAttache(null, 1L, "kvm-attache", null, false); + attache = new ConnectedAgentAttache(null, 1L, "uuid", "kvm-attache", null, false); hostDao = Mockito.mock(HostDao.class); storagePoolMonitor = Mockito.mock(Listener.class); diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java index 3fa6d8d97296..0b42b505668a 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/ConnectedAgentAttacheTest.java @@ -31,8 +31,8 @@ public void testEquals() throws Exception { Link link = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); assertTrue(agentAttache1.equals(agentAttache2)); } @@ -42,7 +42,7 @@ public void testEqualsFalseNull() throws Exception { Link link = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link, false); assertFalse(agentAttache1.equals(null)); } @@ -53,8 +53,8 @@ public void testEqualsFalseDiffLink() throws Exception { Link link1 = mock(Link.class); Link link2 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, null, link1, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, null, link2, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 0, "uuid", null, link1, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 0, "uuid", null, link2, false); assertFalse(agentAttache1.equals(agentAttache2)); } @@ -64,8 +64,8 @@ public void testEqualsFalseDiffId() throws Exception { Link link1 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false); - ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, null, link1, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false); + ConnectedAgentAttache agentAttache2 = new ConnectedAgentAttache(null, 2, "uuid", null, link1, false); assertFalse(agentAttache1.equals(agentAttache2)); } @@ -75,7 +75,7 @@ public void testEqualsFalseDiffClass() throws Exception { Link link1 = mock(Link.class); - ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, null, link1, false); + ConnectedAgentAttache agentAttache1 = new ConnectedAgentAttache(null, 1, "uuid", null, link1, false); assertFalse(agentAttache1.equals("abc")); } diff --git a/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java b/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java index fe9b7fafa810..65e31c271a42 100644 --- a/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java +++ b/engine/orchestration/src/test/java/com/cloud/agent/manager/DirectAgentAttacheTest.java @@ -26,6 +26,8 @@ import com.cloud.resource.ServerResource; +import java.util.UUID; + @RunWith(MockitoJUnitRunner.class) public class DirectAgentAttacheTest { @Mock @@ -36,9 +38,11 @@ public class DirectAgentAttacheTest { long _id = 0L; + String _uuid = UUID.randomUUID().toString(); + @Before public void setup() { - directAgentAttache = new DirectAgentAttache(_agentMgr, _id, "myDirectAgentAttache", _resource, false); + directAgentAttache = new DirectAgentAttache(_agentMgr, _id, _uuid, "myDirectAgentAttache", _resource, false); MockitoAnnotations.initMocks(directAgentAttache); } diff --git a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java index 0bf110757d7c..73cd3d184dea 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java @@ -32,6 +32,7 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_offerings") @@ -471,8 +472,8 @@ public NetworkOfferingVO(String name, Network.GuestType guestType, boolean speci @Override public String toString() { - StringBuilder buf = new StringBuilder("[Network Offering ["); - return buf.append(id).append("-").append(trafficType).append("-").append(name).append("]").toString(); + return String.format("NetworkOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "trafficType")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java index 885cbfd98ab1..7a186b604295 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java @@ -20,6 +20,7 @@ import org.apache.cloudstack.storage.object.ObjectStore; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -140,4 +141,11 @@ public void setUsedBytes(Long usedBytes) { public void setDetails(Map details) { this.details = details; } + + @Override + public String toString() { + return String.format("ObjectStore %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "providerName")); + } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java index e3f4bcbdeca5..758bbe0c8c48 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/LocalHostEndpoint.java @@ -73,6 +73,11 @@ public long getId() { return 0; } + @Override + public String getUuid() { + return ""; + } + @Override public String getHostAddr() { return "127.0.0.0"; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java index fdde4ce3e624..bd4bce29b0a0 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java @@ -55,6 +55,7 @@ public class RemoteHostEndPoint implements EndPoint { protected Logger logger = LogManager.getLogger(getClass()); private long hostId; + private String hostUuid; private String hostAddress; private String publicAddress; @@ -74,6 +75,7 @@ public RemoteHostEndPoint() { private void configure(Host host) { hostId = host.getId(); + hostUuid = host.getUuid(); hostAddress = host.getPrivateIpAddress(); publicAddress = host.getPublicIpAddress(); if (Host.Type.SecondaryStorageVM == host.getType()) { @@ -106,6 +108,11 @@ public long getId() { return hostId; } + @Override + public String getUuid() { + return hostUuid; + } + // used when HypervisorGuruManager choose a different host to send command private void setId(long id) { HostVO host = _hostDao.findById(id); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java index 1ee4d40a5678..5871ecdee5aa 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java @@ -76,7 +76,7 @@ public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterSc if (hosts != null) { for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), store.getId()); + storageMgr.connectHostToSharedPool(host, store.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + store, e); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index d734067384f8..1afc1a68b44e 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -27,6 +27,7 @@ import com.cloud.agent.api.to.NicTO; import com.cloud.alert.AlertManager; import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.StorageConflictException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; @@ -85,6 +86,8 @@ public class DefaultHostListener implements HypervisorHostListener { @Inject StorageService storageService; @Inject + DataCenterDao zoneDao; + @Inject NetworkOfferingDao networkOfferingDao; @Inject HostDao hostDao; @@ -134,7 +137,8 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); cmd.setWait(modifyStoragePoolCommandWait); - logger.debug("Sending modify storage pool command to agent: {} for storage pool: {} with timeout {} seconds", hostId, pool, cmd.getWait()); + HostVO host = hostDao.findById(hostId); + logger.debug("Sending modify storage pool command to agent: {} for storage pool: {} with timeout {} seconds", host, pool, cmd.getWait()); final Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { @@ -172,7 +176,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep storageService.updateStorageCapabilities(poolId, false); - logger.info("Connection established between storage pool " + pool + " and host " + hostId); + logger.info("Connection established between storage pool {} and host {}", pool, host); return createPersistentNetworkResourcesOnHost(hostId); } @@ -260,7 +264,7 @@ private void setupPersistentNetwork(HostVO host) { } if (!answer.getResult()) { logger.error("Unable to create persistent network resources for network {} on the host {} in zone {}", - networkVO, host, networkVO.getDataCenterId()); + networkVO::toString, host::toString, () -> zoneDao.findById(networkVO.getDataCenterId())); } } } diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java index 355eb075129e..538ba1a17613 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java @@ -100,7 +100,7 @@ public void testChangeStoragePoolScopeToZone() throws Exception { ReflectionTestUtils.setField(host, "id", HOST_ID); List hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware); Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, ZONE_ID, CLUSTER_ID)).thenReturn(Arrays.asList(host)); - Mockito.when(storageManager.connectHostToSharedPool(HOST_ID, POOL_ID)).thenReturn(true); + Mockito.when(storageManager.connectHostToSharedPool(host, POOL_ID)).thenReturn(true); dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, null); diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index c78bf105c939..e26e32e7b2ef 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -596,7 +596,7 @@ protected void runInContext() { profilerHeartbeatUpdate.start(); txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); if (logger.isTraceEnabled()) { - logger.trace("Cluster manager heartbeat update, id:" + _mshostId); + logger.trace("Cluster manager heartbeat update, id: {}, mshost: {}", _mshostId, _mshost); } _mshostDao.update(_mshostId, _runId, DateUtil.currentGMTTime()); @@ -604,7 +604,7 @@ protected void runInContext() { profilerPeerScan.start(); if (logger.isTraceEnabled()) { - logger.trace("Cluster manager peer-scan, id:" + _mshostId); + logger.trace("Cluster manager peer-scan, id: {}, mshost: {}", _mshostId, _mshost); } if (!_peerScanInited) { @@ -1034,7 +1034,7 @@ private static ManagementServerHostVO getInListById(final Long id, final List() { diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index 82fea9749ff8..c7f2daadc518 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1051,6 +1051,10 @@ public T lockRow(ID id, Boolean lock) { } protected T findById(ID id, boolean removed, Boolean lock) { + if (id == null) { + return null; + } + StringBuilder sql = new StringBuilder(_selectByIdSql); if (!removed && _removed != null) { sql.append(" AND ").append(_removed.first()); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index 318ac225c8c0..0aba829f3cc7 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -135,7 +135,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (_capacityMgr.checkIfHostHasCapacity(h, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { logger.debug("Find host " + h.getId() + " has enough capacity"); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index e794cef05803..31f87d7e0442 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -21,6 +21,7 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckOnHostCommand; import com.cloud.agent.api.CheckVMActivityOnStoragePoolCommand; +import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.host.Host; @@ -51,6 +52,8 @@ public class KVMHostActivityChecker extends AdapterBase implements ActivityCheckerInterface, HealthCheckerInterface { + @Inject + private ClusterDao clusterDao; @Inject private VolumeDao volumeDao; @Inject @@ -215,7 +218,7 @@ private HashMap> getVolumeUuidOnHost(Host agent) { public long[] getNeighbors(Host agent) { List neighbors = new ArrayList(); List cluster_hosts = resourceManager.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); - logger.debug("Retrieving all \"Up\" hosts from cluster [{}]...", agent.getClusterId()); + logger.debug("Retrieving all \"Up\" hosts from cluster [{}]...", clusterDao.findById(agent.getClusterId())); for (HostVO host : cluster_hosts) { if (host.getId() == agent.getId() || (host.getHypervisorType() != Hypervisor.HypervisorType.KVM && host.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 71be8b1a475d..be64b15c7ba3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -523,7 +523,7 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin if (logger.isDebugEnabled()) { logger.debug(String.format("Checking host ID: %s for capacity already reserved %d", hostVO.getUuid(), reserved)); } - if (capacityManager.checkIfHostHasCapacity(hostVO.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (capacityManager.checkIfHostHasCapacity(hostVO, cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { logger.debug("Found host ID == '{}' to have enough capacity, CPU={} RAM={}", hostVO.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(hostVO, reserved)); suitable_host_found = true; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 571c97eeb70f..3f0125728c16 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -265,7 +265,7 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f if (logger.isDebugEnabled()) { logger.debug(String.format("Checking host : %s for capacity already reserved %d", h.getName(), reserved)); } - if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + if (capacityManager.checkIfHostHasCapacity(h, cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { logger.debug("Found host {} with enough capacity: CPU={} RAM={}", h.getName(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(h, reserved)); suitable_host_found = true; diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index f6ace68e2e8a..c08d8b8b4f9b 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -286,7 +286,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { - _storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + _storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); } catch (Exception e) { logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); @@ -316,7 +316,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java index 5d7f2d86e149..38b8b0ecb7a8 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java @@ -377,7 +377,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { - storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); } catch (Exception e) { logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); @@ -433,7 +433,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java index 6d222b36b852..04ea3141423d 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/DateraPrimaryDataStoreLifeCycle.java @@ -260,7 +260,7 @@ public boolean attachCluster(DataStore datastore, ClusterScope scope) { for (HostVO host : allHosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); poolHosts.add(host); } catch (Exception e) { @@ -302,7 +302,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 0d651fd83d74..978159699ef8 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -25,6 +25,9 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.ValidateVcenterDetailsCommand; import com.cloud.alert.AlertManager; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageConflictException; import com.cloud.exception.StorageUnavailableException; @@ -85,6 +88,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Inject StorageManager storageMgr; + @Inject + ClusterDao clusterDao; @Inject VolumeDao volumeDao; @Inject @@ -94,6 +99,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Inject protected VirtualMachineManager vmMgr; @Inject + HostPodDao podDao; + @Inject protected SecondaryStorageVmDao _secStrgDao; @Inject UserVmDao userVmDao; @@ -102,6 +109,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor @Inject protected DomainRouterDao _domrDao; @Inject + DataCenterDao zoneDao; + @Inject protected StoragePoolHostDao _storagePoolHostDao; @Inject protected AlertManager _alertMgr; @@ -333,13 +342,14 @@ private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, Str return; } else { if (answer != null) { - throw new InvalidParameterValueException("Provided vCenter server details does not match with the existing vCenter in zone id: " + zoneId); + throw new InvalidParameterValueException(String.format("Provided vCenter server details does not match with the existing vCenter in zone: %s", zoneDao.findById(zoneId))); } else { logger.warn("Can not validate vCenter through host {} due to ValidateVcenterDetailsCommand returns null", h); } } } - throw new CloudRuntimeException("Could not validate vCenter details through any of the hosts with in zone: " + zoneId + ", pod: " + podId + ", cluster: " + clusterId); + throw new CloudRuntimeException(String.format("Could not validate vCenter details through any of the hosts with in zone: %s, pod: %s, cluster: %s", + zoneDao.findById(zoneId), podDao.findById(podId), clusterDao.findById(clusterId))); } protected boolean createStoragePool(HostVO host, StoragePool pool) { @@ -381,11 +391,11 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); if (allHosts.isEmpty()) { primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primarystore.getClusterId()))); } if (primarystore.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - logger.warn("Can not create storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("Can not create storage pool {} on cluster {}", primarystore::toString, () -> clusterDao.findById(primarystore.getClusterId())); primaryDataStoreDao.expunge(primarystore.getId()); return false; } @@ -402,7 +412,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { - storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); + storageMgr.connectHostToSharedPool(h, primarystore.getId()); poolHosts.add(h); } catch (StorageConflictException se) { primaryDataStoreDao.expunge(primarystore.getId()); @@ -417,7 +427,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool {} on cluster {}", primarystore::toString, () -> clusterDao.findById(primarystore.getClusterId())); primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -433,7 +443,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host, dataStore.getId()); poolHosts.add(host); } catch (StorageConflictException se) { primaryDataStoreDao.expunge(dataStore.getId()); @@ -518,7 +528,7 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis DataStore dataStore = dataStoreHelper.attachHost(store, scope, existingInfo); if(existingInfo.getCapacityBytes() == 0){ try { - storageMgr.connectHostToSharedPool(scope.getScopeId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(hostDao.findById(scope.getScopeId()), dataStore.getId()); } catch (StorageUnavailableException ex) { logger.error("Storage unavailable ",ex); } catch (StorageConflictException ex) { diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 924c98b7912f..9c096e8eb5ac 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -143,7 +143,6 @@ public void initMocks() throws StorageConflictException { when(_dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); when(store.isShared()).thenReturn(true); - when(store.getName()).thenReturn("newPool"); when(store.getStorageProviderName()).thenReturn("default"); @@ -179,7 +178,7 @@ public void testAttachClusterException() throws Exception { CloudRuntimeException exception = new CloudRuntimeException(exceptionString); StorageManager storageManager = Mockito.mock(StorageManager.class); - Mockito.when(storageManager.connectHostToSharedPool(Mockito.anyLong(), Mockito.anyLong())).thenThrow(exception); + Mockito.when(storageManager.connectHostToSharedPool(Mockito.any(), Mockito.anyLong())).thenThrow(exception); Mockito.when(storageManager.getStoragePoolMountFailureReason(exceptionString)).thenReturn(mountFailureReason); ReflectionTestUtils.setField(_cloudStackPrimaryDataStoreLifeCycle, "storageMgr", storageManager); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java index ac2563cd4368..d74d04dbb429 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java @@ -221,7 +221,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { try { createStoragePool(host.getId(), primaryDataStoreInfo); - _storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); poolHosts.add(host); } catch (Exception e) { @@ -254,7 +254,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java index 62995196cace..79f771721f5a 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java @@ -141,7 +141,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index c72253f5c1cb..38f9dc20fbdc 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -269,7 +269,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { List poolHosts = new ArrayList(); for (HostVO host : hostsInCluster) { try { - if (storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId())) { + if (storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId())) { poolHosts.add(host); } } catch (Exception e) { @@ -301,7 +301,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - if (storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId())) { + if (storageMgr.connectHostToSharedPool(host, dataStore.getId())) { poolHosts.add(host); } } catch (Exception e) { diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index 29e52338a095..5fc4868902eb 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -106,7 +106,7 @@ private String getSdcIdOfHost(HostVO host, StoragePool storagePool) { details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool, storagePool.getPath(), details); - ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId); + ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, host); Map poolDetails = answer.getPoolInfo().getDetails(); if (MapUtils.isEmpty(poolDetails)) { String msg = String.format("PowerFlex storage SDC details not found on the host: %s, (re)install SDC and restart agent", host); @@ -147,15 +147,15 @@ private String getHostSdcId(String sdcGuid, long poolId) { } } - private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, HostVO host) { + Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getName() + ")"); } if (!answer.getResult()) { - String msg = "Unable to attach PowerFlex storage pool " + storagePool + " to host " + hostId; + String msg = "Unable to attach PowerFlex storage pool " + storagePool + " to host " + host.getUuid(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); @@ -163,7 +163,7 @@ private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCo " (" + storagePool.getId() + ")"); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; PowerFlex Storage Pool = " + storagePool.getId() + " Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; PowerFlex Storage Pool = " + storagePool.getId() + " Host = " + host; return (ModifyStoragePoolAnswer) answer; } diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index 84a0bfe9398a..a7deb8845f8c 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -152,7 +152,6 @@ public void testAttachZone() throws Exception { when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.getId()).thenReturn(1L); when(store.isShared()).thenReturn(true); - when(store.getName()).thenReturn("ScaleIOPool"); when(store.getStorageProviderName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); when(dataStoreProviderMgr.getDataStoreProvider(ScaleIOUtil.PROVIDER_NAME)).thenReturn(dataStoreProvider); diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index 0b5393bd8d8b..1dbbf458b489 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -241,7 +241,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } @@ -265,7 +265,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h for (HostVO host : hosts) { try { - _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index 4877e86bf9f6..f79db2d67829 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -410,7 +410,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { for (HostVO host : allHosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); poolHosts.add(host); } catch (Exception e) { diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java index 4f2fdef17236..60427e65ea61 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java @@ -211,7 +211,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h List kvmHosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); for (HostVO host : kvmHosts) { try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { logger.warn(String.format("Unable to establish a connection between host %s and pool %s due to %s", host, dataStore, e)); } diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 1bf4c31ab1c5..732c78b775e3 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -170,21 +170,21 @@ public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserv if (hostId == null) { return true; } + HostVO host = _hostDao.findById(hostId); + return releaseVmCapacity(vm, moveFromReserved, moveToReservered, host); + } - final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); - CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); - CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); - CapacityVO capacityCpuCore = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU_CORE); - Long clusterId = null; - if (hostId != null) { - HostVO host = _hostDao.findById(hostId); - if (host == null) { - logger.warn("Host " + hostId + " no long exist anymore!"); - return true; - } - - clusterId = host.getClusterId(); + @DB + public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserved, final boolean moveToReservered, final Host host) { + if (host == null) { + return true; } + + final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); + CapacityVO capacityCpu = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); + CapacityVO capacityMemory = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); + CapacityVO capacityCpuCore = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU_CORE); + Long clusterId = host.getClusterId(); if (capacityCpu == null || capacityMemory == null || svo == null || capacityCpuCore == null) { return false; } @@ -255,13 +255,13 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } - logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + - ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() + - "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); + logger.debug("release cpu from host: {}, old used: {}, " + + "reserved: {}, actual total: {}, total with overprovisioning: {}; " + + "new used: {},reserved:{}; movedfromreserved: {},moveToReservered: {}", host, usedCpu, reservedCpu, actualTotalCpu, totalCpu, capacityCpu.getUsedCapacity(), capacityCpu.getReservedCapacity(), moveFromReserved, moveToReservered); - logger.debug("release mem from host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ",reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; new used: " + - toHumanReadableSize(capacityMemory.getUsedCapacity()) + ",reserved:" + toHumanReadableSize(capacityMemory.getReservedCapacity()) + "; movedfromreserved: " + moveFromReserved + - ",moveToReservered" + moveToReservered); + logger.debug("release mem from host: {}, old used: {}, " + + "reserved: {}, total: {}; new used: {}, reserved: {}; " + + "movedfromreserved: {}, moveToReservered: {}", host, toHumanReadableSize(usedMem), toHumanReadableSize(reservedMem), toHumanReadableSize(totalMem), toHumanReadableSize(capacityMemory.getUsedCapacity()), toHumanReadableSize(capacityMemory.getReservedCapacity()), moveFromReserved, moveToReservered); _capacityDao.update(capacityCpu.getId(), capacityCpu); _capacityDao.update(capacityMemory.getId(), capacityMemory); @@ -331,9 +331,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { long freeMem = totalMem - (reservedMem + usedMem); if (logger.isDebugEnabled()) { - logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); - logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); - logger.debug("Current Used RAM: " + toHumanReadableSize(usedMem) + " , Free RAM:" + toHumanReadableSize(freeMem) + " ,Requested RAM: " + toHumanReadableSize(ram)); + logger.debug("We are allocating VM, increasing the used capacity of this host:{}", host); + logger.debug("Current Used CPU: {} , Free CPU:{} ,Requested CPU: {}", usedCpu, freeCpu, cpu); + logger.debug("Current Used RAM: {} , Free RAM:{} ,Requested RAM: {}", toHumanReadableSize(usedMem), toHumanReadableSize(freeMem), toHumanReadableSize(ram)); } capacityCpu.setUsedCapacity(usedCpu + cpu); capacityMem.setUsedCapacity(usedMem + ram); @@ -361,13 +361,19 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } - logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + - actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + - capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost); + logger.debug(String.format("CPU STATS after allocation: for host: %s, " + + "old used: %d, old reserved: %d, actual total: %d, " + + "total with overprovisioning: %d; new used: %d, reserved: %d; " + + "requested cpu: %d, alloc_from_last: %s", + host, usedCpu, reservedCpu, actualTotalCpu, totalCpu, + capacityCpu.getUsedCapacity(), capacityCpu.getReservedCapacity(), cpu, fromLastHost)); - logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ", old reserved: " + toHumanReadableSize(reservedMem) + ", total: " + - toHumanReadableSize(totalMem) + "; new used: " + toHumanReadableSize(capacityMem.getUsedCapacity()) + ", reserved: " + toHumanReadableSize(capacityMem.getReservedCapacity()) + "; requested mem: " + toHumanReadableSize(ram) + - ",alloc_from_last:" + fromLastHost); + logger.debug("RAM STATS after allocation: for host: {}, " + + "old used: {}, old reserved: {}, total: {}; new used: {}, reserved: {}; " + + "requested mem: {}, alloc_from_last: {}", + host, toHumanReadableSize(usedMem), toHumanReadableSize(reservedMem), + toHumanReadableSize(totalMem), toHumanReadableSize(capacityMem.getUsedCapacity()), + toHumanReadableSize(capacityMem.getReservedCapacity()), toHumanReadableSize(ram), fromLastHost); long cluster_id = host.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); @@ -380,11 +386,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (hostHasCpuCapability) { // first check from reserved capacity - hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu, ram, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = checkIfHostHasCapacity(host, cpu, ram, true, cpuOvercommitRatio, memoryOvercommitRatio, true); // if not reserved, check the free capacity if (!hostHasCapacity) - hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu, ram, false, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = checkIfHostHasCapacity(host, cpu, ram, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } if (!hostHasCapacity || !hostHasCpuCapability) { @@ -414,41 +420,40 @@ public boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; if (isCpuNumGood && isCpuSpeedGood) { if (logger.isDebugEnabled()) { - logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + - ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); + logger.debug("Host: {} has cpu capability (cpu:{}, speed:{}) " + + "to support requested CPU: {} and requested speed: {}", host, host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); } return true; } else { if (logger.isDebugEnabled()) { - logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + - ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); + logger.debug("Host: {} doesn't have cpu capability (cpu:{}, speed:{})" + + " to support requested CPU: {} and requested speed: {}", host, host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); } return false; } } @Override - public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio, float memoryOvercommitRatio, + public boolean checkIfHostHasCapacity(Host host, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio, float memoryOvercommitRatio, boolean considerReservedCapacity) { boolean hasCapacity = false; if (logger.isDebugEnabled()) { - logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + toHumanReadableSize(ram) + - " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); + logger.debug(String.format("Checking if host: %s has enough capacity for requested CPU: %d and requested RAM: %s , cpuOverprovisioningFactor: %s", host, cpu, toHumanReadableSize(ram), cpuOvercommitRatio)); } - CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_CPU); - CapacityVO capacityMem = _capacityDao.findByHostIdType(hostId, Capacity.CAPACITY_TYPE_MEMORY); + CapacityVO capacityCpu = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_CPU); + CapacityVO capacityMem = _capacityDao.findByHostIdType(host.getId(), Capacity.CAPACITY_TYPE_MEMORY); if (capacityCpu == null || capacityMem == null) { if (capacityCpu == null) { if (logger.isDebugEnabled()) { - logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for host: {}", host); } } if (capacityMem == null) { if (logger.isDebugEnabled()) { - logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for host: {}", host); } } @@ -523,21 +528,15 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea logger.debug("Host has enough CPU and RAM available"); } - logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + - ", total with overprovisioning: " + totalCpu + "; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity + - " ,considerReservedCapacity?: " + considerReservedCapacity); + logger.debug("STATS: Can alloc CPU from host: {}, used: {}, reserved: {}, actual total: {}, total with overprovisioning: {}; requested cpu: {}, alloc_from_last_host?: {}, considerReservedCapacity?: {}", host, usedCpu, reservedCpu, actualTotalCpu, totalCpu, cpu, checkFromReservedCapacity, considerReservedCapacity); - logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + toHumanReadableSize(usedMem) + ", reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + - "; requested mem: " + toHumanReadableSize(ram) + ", alloc_from_last_host?: " + checkFromReservedCapacity + " , considerReservedCapacity?: " + considerReservedCapacity); + logger.debug("STATS: Can alloc MEM from host: {}, used: {}, reserved: {}, total: {}; requested mem: {}, alloc_from_last_host?: {}, considerReservedCapacity?: {}", host, toHumanReadableSize(usedMem), toHumanReadableSize(reservedMem), toHumanReadableSize(totalMem), toHumanReadableSize(ram), checkFromReservedCapacity, considerReservedCapacity); } else { if (checkFromReservedCapacity) { - logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + - toHumanReadableSize(reservedMem) + ", requested mem: " + toHumanReadableSize(ram)); + logger.debug("STATS: Failed to alloc resource from host: {} reservedCpu: {}, requested cpu: {}, reservedMem: {}, requested mem: {}", host, reservedCpu, cpu, toHumanReadableSize(reservedMem), toHumanReadableSize(ram)); } else { - logger.debug("STATS: Failed to alloc resource from host: " + hostId + ", reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + - cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + toHumanReadableSize(reservedMem) + ", used Mem: " + - toHumanReadableSize(usedMem) + ", requested mem: " + toHumanReadableSize(ram) + ", total Mem:" + toHumanReadableSize(totalMem) + " ,considerReservedCapacity?: " + considerReservedCapacity); + logger.debug("STATS: Failed to alloc resource from host: {}, reservedCpu: {}, used cpu: {}, requested cpu: {}, actual total cpu: {}, total cpu with overprovisioning: {}, reservedMem: {}, used Mem: {}, requested mem: {}, total Mem: {}, considerReservedCapacity?: {}", host, reservedCpu, usedCpu, cpu, actualTotalCpu, totalCpu, toHumanReadableSize(reservedMem), toHumanReadableSize(usedMem), toHumanReadableSize(ram), toHumanReadableSize(totalMem), considerReservedCapacity); } if (logger.isDebugEnabled()) { @@ -654,12 +653,12 @@ public void updateCapacityForHost(final Host host, final Map vms = _vmDao.listUpByHostId(host.getId()); if (logger.isDebugEnabled()) { - logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); + logger.debug("Found {} VMs on host {}", vms.size(), host); } final List vosMigrating = _vmDao.listVmsMigratingFromHost(host.getId()); if (logger.isDebugEnabled()) { - logger.debug("Found " + vosMigrating.size() + " VMs are Migrating from host " + host.getId()); + logger.debug("Found {} VMs are Migrating from host {}", vosMigrating.size(), host); } vms.addAll(vosMigrating); @@ -704,7 +703,7 @@ public void updateCapacityForHost(final Host host, final Map vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); if (logger.isDebugEnabled()) { - logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); + logger.debug("Found {} VM, not running on host {}", vmsByLastHostId.size(), host); } for (VMInstanceVO vm : vmsByLastHostId) { Float cpuOvercommitRatio = 1.0f; @@ -770,31 +769,27 @@ public void updateCapacityForHost(final Host host, final Map t if (oldState == State.Starting) { if (newState != State.Running) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } else if (oldState == State.Running) { if (event == Event.AgentReportStopped) { - releaseVmCapacity(vm, false, true, oldHostId); + releaseVmCapacity(vm, false, true, oldHost); } else if (event == Event.AgentReportMigrated) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } else if (oldState == State.Migrating) { if (event == Event.AgentReportStopped) { /* Release capacity from original host */ - releaseVmCapacity(vm, false, false, vm.getLastHostId()); - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, lastHost); + releaseVmCapacity(vm, false, false, oldHost); } else if (event == Event.OperationFailed) { /* Release from dest host */ - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } else if (event == Event.OperationSucceeded) { - releaseVmCapacity(vm, false, false, vm.getLastHostId()); + releaseVmCapacity(vm, false, false, lastHost); } } else if (oldState == State.Stopping) { if (event == Event.OperationSucceeded) { - releaseVmCapacity(vm, false, true, oldHostId); + releaseVmCapacity(vm, false, true, oldHost); } else if (event == Event.AgentReportStopped) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } else if (event == Event.AgentReportMigrated) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } else if (oldState == State.Stopped) { if (event == Event.DestroyRequested || event == Event.ExpungeOperation) { - releaseVmCapacity(vm, true, false, vm.getLastHostId()); + releaseVmCapacity(vm, true, false, lastHost); } else if (event == Event.AgentReportMigrated) { - releaseVmCapacity(vm, false, false, oldHostId); + releaseVmCapacity(vm, false, false, oldHost); } } @@ -1078,7 +1068,6 @@ public float getClusterOverProvisioningFactor(Long clusterId, short capacityType @Override public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequested, long ramRequested) { - Float clusterCpuOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_CPU); Float clusterMemoryOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_MEMORY); Float clusterCpuCapacityDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId); @@ -1086,15 +1075,13 @@ public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequest float cpuConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_CPU, cpuRequested); if (cpuConsumption / clusterCpuOverProvisioning > clusterCpuCapacityDisableThreshold) { - logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning - + " crosses disable threshold " + clusterCpuCapacityDisableThreshold); + logger.debug("Cluster: {} cpu consumption {} crosses disable threshold {}", _clusterDao.findById(clusterId), cpuConsumption / clusterCpuOverProvisioning, clusterCpuCapacityDisableThreshold); return true; } float memoryConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_MEMORY, ramRequested); if (memoryConsumption / clusterMemoryOverProvisioning > clusterMemoryCapacityDisableThreshold) { - logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning - + " crosses disable threshold " + clusterMemoryCapacityDisableThreshold); + logger.debug("Cluster: {} memory consumption {} crosses disable threshold {}", _clusterDao.findById(clusterId), memoryConsumption / clusterMemoryOverProvisioning, clusterMemoryCapacityDisableThreshold); return true; } @@ -1113,7 +1100,7 @@ public Pair checkIfHostHasCpuCapabilityAndCapacity(Host host, Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); - boolean hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, + boolean hostHasCapacity = checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, considerReservedCapacity); return new Pair<>(hostHasCpuCapability, hostHasCapacity); diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index a2399d653b94..2605e46c731f 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -321,8 +321,8 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym logger.debug("Trying to allocate a host and storage pools from datacenter [{}], " + "pod [{}], cluster [{}], to deploy VM [{}] with requested CPU [{}] and requested RAM [{}].", - dc, _podDao.findById(plan.getPodId()), _clusterDao.findById(plan.getClusterId()), - vm, cpuRequested, toHumanReadableSize(ramRequested)); + dc::toString, () -> _podDao.findById(plan.getPodId()), () -> _clusterDao.findById(plan.getClusterId()), + vm::toString, () -> cpuRequested, () -> toHumanReadableSize(ramRequested)); logger.debug("ROOT volume [{}] {} to deploy VM [{}].", getRootVolume(_volsDao.findByInstance(vm.getId())), @@ -439,7 +439,7 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym logger.debug("VM's volume encryption requirements are met by host {}", dest.getHost()); } - if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { + if (checkIfHostFitsPlannerUsage(dest.getHost(), DeploymentPlanner.PlannerResourceUsage.Shared)) { // found destination return dest; } else { @@ -496,11 +496,11 @@ private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, De if (hostHasCpuCapability) { // first check from reserved capacity - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host, cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); // if not reserved, check the free capacity if (!hostHasCapacity) - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host, cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); @@ -992,14 +992,14 @@ private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualM } @DB - protected boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerResourceUsage resourceUsageRequired) { + protected boolean checkIfHostFitsPlannerUsage(final Host host, final PlannerResourceUsage resourceUsageRequired) { // TODO Auto-generated method stub // check if this host has been picked up by some other planner // exclusively // if planner can work with shared host, check if this host has // been marked as 'shared' // else if planner needs dedicated host, - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); if (reservationEntry != null) { final long id = reservationEntry.getId(); PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage(); @@ -1021,7 +1021,7 @@ protected boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerRe public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: {}", host); return false; } // check before updating @@ -1050,22 +1050,22 @@ public Boolean doInTransaction(TransactionStatus status) { } @DB - public boolean checkHostReservationRelease(final Long hostId) { + public boolean checkHostReservationRelease(final Host host) { - if (hostId != null) { - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (host != null) { + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); if (reservationEntry != null && reservationEntry.getResourceUsage() != null) { // check if any VMs are starting or running on this host - List vms = _vmInstanceDao.listUpByHostId(hostId); + List vms = _vmInstanceDao.listUpByHostId(host.getId()); if (vms.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); + logger.debug("Cannot release reservation, Found {} VMs Running on host {}", vms.size(), host); } return false; } - List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); + List vmsByLastHostId = _vmInstanceDao.listByLastHostId(host.getId()); if (vmsByLastHostId.size() > 0) { // check if any VMs are within skip.counting.hours, if yes // we @@ -1074,7 +1074,7 @@ public boolean checkHostReservationRelease(final Long hostId) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { if (logger.isDebugEnabled()) { - logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); + logger.debug("Cannot release reservation, Found VM: {} Stopped but reserved on host {}", stoppedVM, host); } return false; } @@ -1082,10 +1082,10 @@ public boolean checkHostReservationRelease(final Long hostId) { } // check if any VMs are stopping on or migrating to this host - List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); + List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(host.getId(), State.Stopping, State.Migrating, State.Starting); if (vmsStoppingMigratingByHostId.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId); + logger.debug("Cannot release reservation, Found {} VMs stopping/migrating/starting on host {}", vmsStoppingMigratingByHostId.size(), host); } return false; } @@ -1103,7 +1103,7 @@ public boolean checkHostReservationRelease(final Long hostId) { } if (logger.isDebugEnabled()) { - logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + logger.debug("Host has no VMs associated, releasing the planner reservation for host {}", host); } final long id = reservationEntry.getId(); @@ -1113,7 +1113,7 @@ public boolean checkHostReservationRelease(final Long hostId) { public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: {}", host); return false; } // check before updating @@ -1151,7 +1151,7 @@ private void checkHostReservations() { for (PlannerHostReservationVO hostReservation : reservedHosts) { HostVO host = _hostDao.findById(hostReservation.getHostId()); if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { - checkHostReservationRelease(hostReservation.getHostId()); + checkHostReservationRelease(host); } } @@ -1234,10 +1234,9 @@ public boolean configure(final String name, final Map params) th @Override public void onPublishMessage(String senderAddress, String subject, Object obj) { VMInstanceVO vm = ((VMInstanceVO)obj); - logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + - ", checking if host reservation can be released for host:" + vm.getLastHostId()); - Long hostId = vm.getLastHostId(); - checkHostReservationRelease(hostId); + Host host = _hostDao.findById(vm.getLastHostId()); + logger.debug("MessageBus message: host reserved capacity released for VM: {}, checking if host reservation can be released for host:{}", vm, host); + checkHostReservationRelease(host); } }); @@ -1614,7 +1613,7 @@ public int compare(Volume v1, Volume v2) { boolean hostHasEncryption = Boolean.parseBoolean(potentialHostVO.getDetail(Host.HOST_VOLUME_ENCRYPTION)); boolean hostMeetsEncryptionRequirements = !anyVolumeRequiresEncryption(new ArrayList<>(volumesOrderBySizeDesc)) || hostHasEncryption; - boolean hostFitsPlannerUsage = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); + boolean hostFitsPlannerUsage = checkIfHostFitsPlannerUsage(potentialHost, resourceUsageRequired); if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && hostFitsPlannerUsage) { logger.debug("Found a potential host {} and associated storage pools for this VM", potentialHost); @@ -1782,7 +1781,7 @@ protected Pair>, List> findSuitablePoolsFo Boolean useLocalStorageForSystemVM = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(zone.getId()); if (useLocalStorageForSystemVM != null) { useLocalStorage = useLocalStorageForSystemVM.booleanValue(); - logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId()); + logger.debug("System VMs will use {} storage for zone {}", useLocalStorage ? "local" : "shared", zone); } } else { useLocalStorage = diskOffering.isUseLocalStorage(); @@ -1846,7 +1845,6 @@ private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmP private boolean checkIfPoolCanBeReused(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, Map> suitableVolumeStoragePools, List readyAndReusedVolumes, VolumeVO toBeCreated) { - logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated, vmProfile, toBeCreated.getPoolId()); List suitablePools = new ArrayList<>(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { @@ -1855,6 +1853,8 @@ private boolean checkIfPoolCanBeReused(VirtualMachineProfile vmProfile, Deployme pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); } + logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated, vmProfile, pool); + if (!pool.isInMaintenance()) { if (!avoid.shouldAvoid(pool)) { return canReusePool(vmProfile, plan, suitableVolumeStoragePools, readyAndReusedVolumes, toBeCreated, suitablePools, pool); diff --git a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java index 04f7c0e72564..0aee5f234969 100644 --- a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java @@ -263,8 +263,13 @@ public boolean isRecurring() { } @Override - @DB public boolean processAnswers(long agentId, long seq, Answer[] answers) { + return processAnswers(agentId, null, null, seq, answers); + } + + @Override + @DB + public boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { /* * Do not collect Direct Network usage stats if the Traffic Monitor is not owned by this mgmt server */ @@ -275,7 +280,7 @@ public boolean processAnswers(long agentId, long seq, Answer[] answers) { return false; } } else { - logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor [id: {}, uuid: {}, name: {}", agentId, uuid, name); return false; } @@ -472,8 +477,13 @@ public AgentControlAnswer processControlCommand(long agentId, AgentControlComman @Override public boolean processDisconnect(long agentId, Status state) { + return processDisconnect(agentId, null, null, state); + } + + @Override + public boolean processDisconnect(long agentId, String uuid, String name, Status state) { if (logger.isDebugEnabled()) { - logger.debug("Disconnected called on " + agentId + " with status " + state.toString()); + logger.debug("Disconnected called on [id: {}, uuid: {}, name: {}] with status {}", agentId, uuid, name, state.toString()); } return true; } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 5f86c0e17210..e171b68399bf 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -493,7 +493,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { _userStatsDao.update(userStats.getId(), userStats); logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); } else { - logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); + DataCenterVO zone = _dcDao.findById(router.getDataCenterId()); + Account account = _accountMgr.getAccount(router.getAccountId()); + logger.warn("User stats for router {} were not created for account {} and dc {}", router, account, zone); } } } @@ -1194,7 +1196,7 @@ private List getFailingChecks(DomainRouterVO router, GetRouterMonitorRes } } else { resetRouterHealthChecksAndConnectivity(router.getId(), true, true, "Successfully fetched data"); - updateDbHealthChecksFromRouterResponse(router.getId(), answer.getMonitoringResults()); + updateDbHealthChecksFromRouterResponse(router, answer.getMonitoringResults()); return answer.getFailingChecks(); } } @@ -1238,7 +1240,7 @@ private void handleFailingChecks(DomainRouterVO router, List failingChec if (recreateRouter) { logger.warn("Health Check Alert: Found failing checks in " + RouterHealthChecksFailuresToRecreateVrCK + ", attempting recreating router."); - recreateRouter(router.getId()); + recreateRouter(router); } } @@ -1295,7 +1297,8 @@ private boolean restartGuestNetworkInDomainRouter(DomainRouterJoinVO router, Use * @param routerId - the id of the router to be recreated. * @return true if successfully restart is attempted else false. */ - private boolean recreateRouter(long routerId) { + private boolean recreateRouter(DomainRouterVO router) { + long routerId = router.getId(); User systemUser = _userDao.getUser(User.UID_SYSTEM); // Find any VPC containing router join VO, restart it and return @@ -1310,7 +1313,7 @@ private boolean recreateRouter(long routerId) { return restartGuestNetworkInDomainRouter(routerJoinToRestart, systemUser); } - logger.warn("Unable to find a valid guest network or VPC to restart for recreating router id " + routerId); + logger.warn("Unable to find a valid guest network or VPC to restart for recreating router {}", router); return false; } @@ -1407,8 +1410,8 @@ private RouterHealthCheckResultVO parseHealthCheckVOFromJson(final long routerId * @return converts the above JSON into list of RouterHealthCheckResult. */ private List parseHealthCheckResults( - final Map>> checksJson, final long routerId) { - final Map> checksInDb = getHealthChecksFromDb(routerId); + final Map>> checksJson, final DomainRouterVO router) { + final Map> checksInDb = getHealthChecksFromDb(router.getId()); List healthChecks = new ArrayList<>(); final String lastRunKey = "lastRun"; for (String checkType : checksJson.keySet()) { @@ -1425,28 +1428,27 @@ private List parseHealthCheckResults( try { final RouterHealthCheckResultVO hcVo = parseHealthCheckVOFromJson( - routerId, checkName, checkType, checksJson.get(checkType).get(checkName), checksInDb); + router.getId(), checkName, checkType, checksJson.get(checkType).get(checkName), checksInDb); healthChecks.add(hcVo); } catch (Exception ex) { - logger.error("Skipping health check: Exception while parsing check result data for router id " + routerId + - ", check type: " + checkType + ", check name: " + checkName + ":" + ex.getLocalizedMessage(), ex); + logger.error("Skipping health check: Exception while parsing check result data for router {}, check type: {}, check name: {}:{}", router, checkType, checkName, ex.getLocalizedMessage(), ex); } } } return healthChecks; } - private List updateDbHealthChecksFromRouterResponse(final long routerId, final String monitoringResult) { + private List updateDbHealthChecksFromRouterResponse(final DomainRouterVO router, final String monitoringResult) { if (StringUtils.isBlank(monitoringResult)) { - logger.warn("Attempted parsing empty monitoring results string for router " + routerId); + logger.warn("Attempted parsing empty monitoring results string for router {}", router); return Collections.emptyList(); } try { - logger.debug("Parsing and updating DB health check data for router: " + routerId + " with data: " + monitoringResult) ; + logger.debug("Parsing and updating DB health check data for router: {} with data: {}", router, monitoringResult); final Type t = new TypeToken>>>() {}.getType(); final Map>> checks = GsonHelper.getGson().fromJson(monitoringResult, t); - return parseHealthCheckResults(checks, routerId); + return parseHealthCheckResults(checks, router); } catch (JsonSyntaxException ex) { logger.error("Unable to parse the result of health checks due to " + ex.getLocalizedMessage(), ex); } @@ -2882,23 +2884,24 @@ public boolean startRemoteAccessVpn(final Network network, final RemoteAccessVpn Answer answer = cmds.getAnswer("users"); if (answer == null) { - logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to null answer"); - throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to null answer", DataCenter.class, router.getDataCenterId()); + DataCenterVO zone = _dcDao.findById(router.getDataCenterId()); + Account account = _accountMgr.getAccount(vpn.getAccountId()); + logger.error("Unable to start vpn {} : unable add users to vpn in zone {} for account {} on domR: {} due to null answer", vpn, zone, account, router.getInstanceName()); + throw new ResourceUnavailableException(String.format("Unable to start vpn %s in zone %s for account %s on domR: %s due to null answer", vpn, zone, account, router.getInstanceName()), DataCenter.class, router.getDataCenterId()); } if (!answer.getResult()) { - logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to " + answer.getDetails()); - throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " - + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); + DataCenterVO zone = _dcDao.findById(router.getDataCenterId()); + Account account = _accountMgr.getAccount(vpn.getAccountId()); + logger.error("Unable to start vpn {} : unable add users to vpn in zone {} for account {} on domR: {} due to {}", vpn, zone, account, router.getInstanceName(), answer.getDetails()); + throw new ResourceUnavailableException(String.format("Unable to start vpn %s : Unable to add users to vpn in zone %s for account %s on domR: %s due to %s", vpn, zone, account, router.getInstanceName(), answer.getDetails()), DataCenter.class, router.getDataCenterId()); } answer = cmds.getAnswer("startVpn"); if (!answer.getResult()) { - logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() - + " due to " + answer.getDetails()); - throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); + DataCenterVO zone = _dcDao.findById(router.getDataCenterId()); + Account account = _accountMgr.getAccount(vpn.getAccountId()); + + logger.error("Unable to start vpn {} in zone {} for account {} on domR: {} due to {}", vpn, zone, account, router.getInstanceName(), answer.getDetails()); + throw new ResourceUnavailableException(String.format("Unable to start vpn %s in zone %s for account %s on domR: %s due to %s", vpn, zone, account, router.getInstanceName(), answer.getDetails()), DataCenter.class, router.getDataCenterId()); } } @@ -3029,8 +3032,9 @@ public VirtualRouter startRouter(final long routerId, final boolean reprogramNet for (final NicVO nic : nics) { if (!_networkMgr.startNetwork(nic.getNetworkId(), dest, context)) { - logger.warn("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); - throw new CloudRuntimeException("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); + NetworkVO network = _networkDao.findById(nic.getNetworkId()); + logger.warn("Failed to start network {} as a part of domR start", network); + throw new CloudRuntimeException(String.format("Failed to start network %s as a part of domR start", network)); } } @@ -3196,14 +3200,13 @@ public void collectNetworkStatistics(final T router, f try { answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd); } catch (final Exception e) { - logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); + logger.warn("Error while collecting network stats from router: {} from host: {}", router, router.getHostId(), e); continue; } if (answer != null) { if (!answer.getResult()) { - logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " - + answer.getDetails()); + logger.warn("Error while collecting network stats from router: {} from host: {}; details: {}", router, router.getHostId(), answer.getDetails()); continue; } try { @@ -3219,7 +3222,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), network.getId(), forVpc ? routerNic.getIPv4Address() : null, router.getId(), routerType); if (stats == null) { - logger.warn("unable to find stats for account: " + router.getAccountId()); + logger.warn("unable to find stats for account: {}", () -> _accountMgr.getAccount(router.getAccountId())); return; } @@ -3256,8 +3259,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final Exception e) { - logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + toHumanReadableSize(answer.getBytesReceived()) + "; Tx: " - + toHumanReadableSize(answer.getBytesSent())); + logger.warn("Unable to update user statistics for account: {} Rx: {}; Tx: {}", + _accountMgr.getAccount(router.getAccountId()), toHumanReadableSize(answer.getBytesReceived()), toHumanReadableSize(answer.getBytesSent())); } } } diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java index 0e743b496f84..067f2fbdbb26 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java @@ -81,23 +81,26 @@ public boolean isRecurring() { @Override public boolean processAnswers(long agentId, long seq, Answer[] answers) { + return processAnswers(agentId, null, null, seq, answers); + } + + @Override + public boolean processAnswers(long agentId, String uuid, String name, long seq, Answer[] answers) { List affectedVms = new ArrayList(); for (Answer ans : answers) { if (ans instanceof SecurityGroupRuleAnswer) { SecurityGroupRuleAnswer ruleAnswer = (SecurityGroupRuleAnswer)ans; if (ans.getResult()) { - logger.debug("Successfully programmed rule " + ruleAnswer.toString() + " into host " + agentId); + logger.debug("Successfully programmed rule {} into host [id: {}, uuid: {}, name: {}]", ruleAnswer.toString(), agentId, uuid, name); _workDao.updateStep(ruleAnswer.getVmId(), ruleAnswer.getLogSequenceNumber(), Step.Done); recordSuccess(ruleAnswer.getVmId()); } else { _workDao.updateStep(ruleAnswer.getVmId(), ruleAnswer.getLogSequenceNumber(), Step.Error); ; - logger.debug("Failed to program rule " + ruleAnswer.toString() + " into host " + agentId + " due to " + ruleAnswer.getDetails() + - " and updated jobs"); + logger.debug("Failed to program rule {} into host [id: {}, uuid: {}, name: {}] due to {} and updated jobs", ruleAnswer.toString(), agentId, uuid, name, ruleAnswer.getDetails()); if (ruleAnswer.getReason() == FailureReason.CANNOT_BRIDGE_FIREWALL) { - logger.debug("Not retrying security group rules for vm " + ruleAnswer.getVmId() + " on failure since host " + agentId + - " cannot do bridge firewalling"); + logger.debug("Not retrying security group rules for vm {} on failure since host [id: {}, uuid: {}, name: {}] cannot do bridge firewalling", ruleAnswer.getVmId(), agentId, uuid, name); } else if (ruleAnswer.getReason() == FailureReason.PROGRAMMING_FAILED) { if (checkShouldRetryOnFailure(ruleAnswer.getVmId())) { logger.debug("Retrying security group rules on failure for vm " + ruleAnswer.getVmId()); diff --git a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java index c7bdf9c6f6c9..505516d107be 100644 --- a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java @@ -643,7 +643,7 @@ private Pair performCapacityChecksBeforeHostInMaintenance(Host ClusterDetailsVO clusterDetailsRamOvercommmt = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue()); Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); - boolean hostHasCapacity = capacityManager.checkIfHostHasCapacity(hostInCluster.getId(), cpuRequested, ramRequested, false, + boolean hostHasCapacity = capacityManager.checkIfHostHasCapacity(hostInCluster, cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, false); if (!maxGuestLimit && hostHasCPUCapacity && hostHasCapacity) { canMigrateVm = true; diff --git a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java index e0364681426d..334e9f108356 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -239,7 +239,7 @@ protected void runInContext() { answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume {}", volume); continue; } handleVolumeStatusResponse((UploadStatusAnswer)answer, volume, volumeDataStore); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index f2e03cddb7cd..b5a206af2892 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -436,7 +436,7 @@ public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean // available for (VolumeVO vol : vols) { if (vol.getRemoved() != null) { - logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); + logger.warn("Volume: {} is removed, cannot share on this instance: {}", vol, vm); // not ok to share return false; } @@ -988,7 +988,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource // Check if zone is disabled Account account = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } Map params = new HashMap<>(); @@ -1137,7 +1137,7 @@ public StoragePool disablePrimaryStoragePool(Long id) { throw new IllegalArgumentException(String.format("Unable to find storage pool with ID: %d", id)); } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException(String.format("Primary storage %s cannot be disabled. Storage pool state : %s", primaryStorage, primaryStorage.getStatus().toString())); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -1156,7 +1156,7 @@ public StoragePool enablePrimaryStoragePool(Long id) { throw new IllegalArgumentException(String.format("Unable to find storage pool with ID: %d", id)); } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException(String.format("Primary storage %s cannot be enabled. Storage pool state : %s", primaryStorage, primaryStorage.getStatus())); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -1382,7 +1382,7 @@ public void removeStoragePoolFromCluster(long hostId, String iScsiName, StorageP final Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (answer == null ? "" : (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "")); logger.error(errMsg); @@ -1402,8 +1402,8 @@ public boolean deletePool(DeletePoolCmd cmd) { throw new InvalidParameterValueException("Unable to find pool by id " + id); } if (sPool.getStatus() != StoragePoolStatus.Maintenance) { - logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); - throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); + logger.warn("Unable to delete storage pool: {} due to it is not in Maintenance state", sPool); + throw new InvalidParameterValueException(String.format("Unable to delete storage due to it is not in Maintenance state, pool: %s", sPool)); } if (sPool.getPoolType() == StoragePoolType.DatastoreCluster) { @@ -1419,8 +1419,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } else { - logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); - throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::toString, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are associated non-destroyed vols for this pool", sPool)); } } return deleteDataStoreInternal(sPool, forced); @@ -1481,8 +1481,8 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { if (vlms.first() > 0) { Pair nonDstrdVlms = volumeDao.getNonDestroyedCountAndTotalByPool(sPool.getId()); if (nonDstrdVlms.first() > 0) { - logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); - throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::toString, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool)); } // force expunge non-destroyed volumes List vols = volumeDao.listVolumesToBeDestroyed(); @@ -1491,7 +1491,7 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { try { future.get(); } catch (InterruptedException | ExecutionException e) { - logger.debug("expunge volume failed:" + vol.getId(), e); + logger.debug("expunge volume failed: {}", vol, e); } } } @@ -1499,8 +1499,8 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { // Check if the pool has associated volumes in the volumes table // If it does , then you cannot delete the pool if (vlms.first() > 0) { - logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); - throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::toString, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool)); } } @@ -1509,13 +1509,13 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { if (lock == null) { if (logger.isDebugEnabled()) { - logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + sPool.getId()); + logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO: {}", sPool); } return false; } _storagePoolDao.releaseFromLockTable(lock.getId()); - logger.trace("Released lock for storage pool " + sPool.getId()); + logger.trace("Released lock for storage pool {}", sPool); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName()); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); @@ -1541,21 +1541,21 @@ protected String getStoragePoolNonDestroyedVolumesLog(long storagePoolId) { } @Override - public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { + public boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - logger.debug("Adding pool " + pool.getName() + " to host " + hostId); + logger.debug("Adding pool {} to host {}", pool, host); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); - return listener.hostConnect(hostId, pool.getId()); + return listener.hostConnect(host.getId(), pool.getId()); } @Override public void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - logger.debug("Removing pool " + pool.getName() + " from host " + hostId); + logger.debug("Removing pool {} from host {}", pool::toString, () -> _hostDao.findById(hostId)); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); @@ -1601,14 +1601,14 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l // All this is for the inaccuracy of floats for big number multiplication. BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue(); - logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(storagePool.getCapacityBytes())); + logger.debug("Found storage pool {} of type {} with overprovisioning factor {}", storagePool, storagePool.getPoolType(), overProvFactor); + logger.debug("Total over provisioned capacity calculated is {} * {}", overProvFactor, toHumanReadableSize(storagePool.getCapacityBytes())); } else { - logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); + logger.debug("Found storage pool {} of type {}", storagePool, storagePool.getPoolType()); totalOverProvCapacity = storagePool.getCapacityBytes(); } - logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); + logger.debug("Total over provisioned capacity of the pool {} is {}", storagePool, toHumanReadableSize(totalOverProvCapacity)); CapacityState capacityState = CapacityState.Enabled; if (storagePool.getScope() == ScopeType.ZONE) { DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId()); @@ -1650,8 +1650,8 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l _capacityDao.update(capacity.getId(), capacity); } } - logger.debug("Successfully set Capacity - " + toHumanReadableSize(totalOverProvCapacity) + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " - + storagePool.getId() + ", PodId " + storagePool.getPodId()); + logger.debug("Successfully set Capacity - {} for capacity type - {} , DataCenterId - {}, Pool - {}, PodId {}", + toHumanReadableSize(totalOverProvCapacity), capacityType, storagePool.getDataCenterId(), storagePool, storagePool.getPodId()); } @Override @@ -1679,7 +1679,7 @@ public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirs hostIds.removeAll(hostIdsToAvoid); } if (hostIds == null || hostIds.isEmpty()) { - throw new StorageUnavailableException("Unable to send command to the pool " + pool.getId() + " due to there is no enabled hosts up in this cluster", pool.getId()); + throw new StorageUnavailableException(String.format("Unable to send command to the pool %s due to there is no enabled hosts up in this cluster", pool), pool.getId()); } for (Long hostId : hostIds) { try { @@ -1690,10 +1690,8 @@ public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirs answers.add(_agentMgr.send(targetHostId, cmd)); } return new Pair<>(hostId, answers.toArray(new Answer[answers.size()])); - } catch (AgentUnavailableException e) { - logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); - } catch (OperationTimedoutException e) { - logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); + } catch (AgentUnavailableException | OperationTimedoutException e) { + logger.debug("Unable to send storage pool command to {} via {}", pool::toString, () -> _hostDao.findById(hostId), () -> e); } } @@ -1729,19 +1727,21 @@ public void cleanupStorage(boolean recurring) { try { List unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool); - logger.debug(String.format("Storage pool garbage collector found [%s] templates to be cleaned up in storage pool [%s].", unusedTemplatesInPool.size(), pool.getName())); + logger.debug("Storage pool garbage collector found [{}] templates to be cleaned up in storage pool [{}].", unusedTemplatesInPool.size(), pool); for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - logger.debug(String.format("Storage pool garbage collector is skipping template [%s] clean up on pool [%s] " + - "because it is not completely downloaded.", templatePoolVO.getTemplateId(), templatePoolVO.getPoolId())); + logger.debug("Storage pool garbage collector is skipping " + + "template: {} on pool {} because it is not completely downloaded.", + () -> _templateDao.findById(templatePoolVO.getTemplateId()), () -> _storagePoolDao.findById(templatePoolVO.getPoolId())); continue; } if (!templatePoolVO.getMarkedForGC()) { templatePoolVO.setMarkedForGC(true); _vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO); - logger.debug(String.format("Storage pool garbage collector has marked template [%s] on pool [%s] " + - "for garbage collection.", templatePoolVO.getTemplateId(), templatePoolVO.getPoolId())); + logger.debug("Storage pool garbage collector has marked template [{}] on pool [{}] " + + "for garbage collection.", + () -> _templateDao.findById(templatePoolVO.getTemplateId()), () -> _storagePoolDao.findById(templatePoolVO.getPoolId())); continue; } @@ -1783,9 +1783,9 @@ public void cleanupStorage(boolean recurring) { logger.debug(String.format("Did not find snapshot [%s] in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); } } catch (Exception e) { - logger.error(String.format("Failed to delete snapshot [%s] from storage due to: [%s].", snapshotDataStoreVO.getSnapshotId(), e.getMessage())); + logger.error("Failed to delete snapshot [{}] from storage due to: [{}].", snapshotDataStoreVO, e.getMessage()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Failed to delete snapshot [%s] from storage.", snapshotUuid), e); + logger.debug("Failed to delete snapshot [{}] from storage.", snapshot, e); } } } @@ -1796,8 +1796,8 @@ public void cleanupStorage(boolean recurring) { if (Type.ROOT.equals(vol.getVolumeType())) { VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vol.getInstanceId()); if (vmInstanceVO != null && vmInstanceVO.getState() == State.Destroyed) { - logger.debug(String.format("ROOT volume [%s] will not be expunged because the VM is [%s], therefore this volume will be expunged with the VM" - + " cleanup job.", vol.getUuid(), vmInstanceVO.getState())); + logger.debug("ROOT volume [{}] will not be expunged because the VM is [{}], therefore this volume will be expunged with the VM" + + " cleanup job.", vol, vmInstanceVO.getState()); continue; } } @@ -1810,8 +1810,8 @@ public void cleanupStorage(boolean recurring) { // system, but not necessary. handleManagedStorage(vol); } catch (Exception e) { - logger.error(String.format("Unable to destroy host-side clustered file system [%s] due to: [%s].", vol.getUuid(), e.getMessage())); - logger.debug(String.format("Unable to destroy host-side clustered file system [%s].", vol.getUuid()), e); + logger.error("Unable to destroy host-side clustered file system [{}] due to: [{}].", vol, e.getMessage()); + logger.debug("Unable to destroy host-side clustered file system [{}].", vol, e); } try { @@ -1820,11 +1820,11 @@ public void cleanupStorage(boolean recurring) { volService.ensureVolumeIsExpungeReady(vol.getId()); volService.expungeVolumeAsync(volumeInfo); } else { - logger.debug(String.format("Volume [%s] is already destroyed.", vol.getUuid())); + logger.debug("Volume [{}] is already destroyed.", vol); } } catch (Exception e) { - logger.error(String.format("Unable to destroy volume [%s] due to: [%s].", vol.getUuid(), e.getMessage())); - logger.debug(String.format("Unable to destroy volume [%s].", vol.getUuid()), e); + logger.error("Unable to destroy volume [{}] due to: [{}].", vol, e.getMessage()); + logger.debug("Unable to destroy volume [{}].", vol, e); } } @@ -1838,8 +1838,8 @@ public void cleanupStorage(boolean recurring) { } _snapshotDao.expunge(snapshotVO.getId()); } catch (Exception e) { - logger.error(String.format("Unable to destroy snapshot [%s] due to: [%s].", snapshotVO.getUuid(), e.getMessage())); - logger.debug(String.format("Unable to destroy snapshot [%s].", snapshotVO.getUuid()), e); + logger.error("Unable to destroy snapshot [{}] due to: [{}].", snapshotVO, e.getMessage()); + logger.debug("Unable to destroy snapshot [{}].", snapshotVO, e); } } @@ -1855,7 +1855,7 @@ public void cleanupStorage(boolean recurring) { DataStore dataStore = _dataStoreMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - logger.warn(String.format("There is no secondary storage VM for image store [%s], cannot destroy uploaded volume [%s].", dataStore.getName(), volume.getUuid())); + logger.warn("There is no secondary storage VM for image store {}, cannot destroy uploaded volume {}.", dataStore, volume); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1868,18 +1868,18 @@ public void cleanupStorage(boolean recurring) { // expunge volume from secondary if volume is on image store VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image); if (volOnSecondary != null) { - logger.info(String.format("Expunging volume [%s] uploaded using HTTP POST from secondary data store.", volume.getUuid())); + logger.info("Expunging volume [{}] uploaded using HTTP POST from secondary data store.", volume); AsyncCallFuture future = volService.expungeVolumeAsync(volOnSecondary); VolumeApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn(String.format("Failed to expunge volume [%s] from the image store [%s] due to: [%s].", volume.getUuid(), dataStore.getName(), result.getResult())); + logger.warn("Failed to expunge volume {} from the image store {} due to: {}", volume, dataStore, result.getResult()); } } } } } catch (Throwable th) { - logger.error(String.format("Unable to destroy uploaded volume [%s] due to: [%s].", volume.getUuid(), th.getMessage())); - logger.debug(String.format("Unable to destroy uploaded volume [%s].", volume.getUuid()), th); + logger.error("Unable to destroy uploaded volume [{}] due to: [{}].", volume, th.getMessage()); + logger.debug("Unable to destroy uploaded volume [{}].", volume, th); } } @@ -1895,7 +1895,7 @@ public void cleanupStorage(boolean recurring) { DataStore dataStore = _dataStoreMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - logger.warn(String.format("Cannot destroy uploaded template [%s] as there is no secondary storage VM for image store [%s].", template.getUuid(), dataStore.getName())); + logger.warn("Cannot destroy uploaded template {} as there is no secondary storage VM for image store {}.", template, dataStore); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1904,7 +1904,7 @@ public void cleanupStorage(boolean recurring) { AsyncCallFuture future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore)); TemplateApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn(String.format("Failed to delete template [%s] from image store [%s] due to: [%s]", template.getUuid(), dataStore.getName(), result.getResult())); + logger.warn("Failed to delete template {} from the image store {} due to: {}", template, dataStore, result.getResult()); continue; } // remove from template_zone_ref @@ -1928,8 +1928,8 @@ public void cleanupStorage(boolean recurring) { } } } catch (Throwable th) { - logger.error(String.format("Unable to destroy uploaded template [%s] due to: [%s].", template.getUuid(), th.getMessage())); - logger.debug(String.format("Unable to destroy uploaded template [%s].", template.getUuid()), th); + logger.error("Unable to destroy uploaded template [{}] due to: [{}].", template, th.getMessage()); + logger.debug("Unable to destroy uploaded template [{}].", template, th); } } cleanupInactiveTemplates(); @@ -2015,7 +2015,7 @@ private void handleManagedStorage(Volume volume) { if (answer != null && answer.getResult()) { volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); } else { - logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid()); + logger.warn("Unable to remove host-side clustered file system for the following volume: {}", volume); } } } @@ -2077,7 +2077,7 @@ public void cleanupSecondaryStorage(boolean recurring) { try { long storeId = store.getId(); List destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId); - logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found {} templates to cleanup on template_store_ref for store: {}", destroyedTemplateStoreVOs.size(), store); for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) { if (logger.isDebugEnabled()) { logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); @@ -2085,7 +2085,7 @@ public void cleanupSecondaryStorage(boolean recurring) { _templateStoreDao.remove(destroyedTemplateStoreVO.getId()); } } catch (Exception e) { - logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e); + logger.warn("problem cleaning up templates in template_store_ref for store: {}", store, e); } } @@ -2093,7 +2093,7 @@ public void cleanupSecondaryStorage(boolean recurring) { for (DataStore store : imageStores) { try { List destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId()); - logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found {} snapshots to cleanup on snapshot_store_ref for store: {}", destroyedSnapshotStoreVOs.size(), store); for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) { // check if this snapshot has child SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store); @@ -2121,7 +2121,7 @@ public void cleanupSecondaryStorage(boolean recurring) { } } catch (Exception e2) { - logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: {}", store, e2); } } @@ -2131,7 +2131,7 @@ public void cleanupSecondaryStorage(boolean recurring) { try { List destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId()); destroyedStoreVOs.addAll(_volumeDataStoreDao.listByVolumeState(Volume.State.Expunged)); - logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found {} volumes to cleanup on volume_store_ref for store: {}", destroyedStoreVOs.size(), store); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { if (logger.isDebugEnabled()) { logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); @@ -2140,7 +2140,7 @@ public void cleanupSecondaryStorage(boolean recurring) { } } catch (Exception e2) { - logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up volumes in volume_store_ref for store: {}", store, e2); } } } catch (Exception e3) { @@ -2175,7 +2175,7 @@ public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStor } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException(String.format("Primary storage %s is not ready for migration, as the status is:%s", primaryStorage, primaryStorage.getStatus().toString())); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -2184,7 +2184,7 @@ public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStor if (primaryStorage.getPoolType() == StoragePoolType.DatastoreCluster) { if (primaryStorage.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException(String.format("There is already a job running for preparation for maintenance of the storage pool %s", primaryStorage.getUuid())); + throw new CloudRuntimeException(String.format("There is already a job running for preparation for maintenance of the storage pool %s", primaryStorage)); } handlePrepareDatastoreClusterMaintenance(lifeCycle, primaryStorageId); } @@ -2216,7 +2216,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { lifeCycle.maintain(childStore); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Exception on maintenance preparation of one of the child datastores in datastore cluster %d with error %s", primaryStorageId, e)); + logger.debug("Exception on maintenance preparation of one of the child datastores in datastore cluster {} with error {}", datastoreCluster, e); } // Set to ErrorInMaintenance state of all child storage pools and datastore cluster for (StoragePoolVO childDatastore : childDatastores) { @@ -2225,7 +2225,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } datastoreCluster.setStatus(StoragePoolStatus.ErrorInMaintenance); _storagePoolDao.update(datastoreCluster.getId(), datastoreCluster); - throw new CloudRuntimeException(String.format("Failed to prepare maintenance mode for datastore cluster %d with error %s %s", primaryStorageId, e.getMessage(), e)); + throw new CloudRuntimeException(String.format("Failed to prepare maintenance mode for datastore cluster %s with error %s %s", datastoreCluster, e.getMessage(), e)); } } } @@ -2247,7 +2247,7 @@ public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStor } if (primaryStorage.getStatus().equals(StoragePoolStatus.Up) || primaryStorage.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)) { - throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), + throw new StorageUnavailableException("Primary storage " + primaryStorage + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), primaryStorageId); } @@ -2276,7 +2276,7 @@ public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) { StoragePoolVO pool = _storagePoolDao.findById(poolId); if (pool == null) { - String msg = String.format("Unable to obtain lock on the storage pool record while syncing storage pool [%s] with management server", pool.getUuid()); + String msg = String.format("Unable to find the storage pool with id %d record while syncing storage pool with management server", poolId); logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -2286,7 +2286,7 @@ public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) { } if (!pool.getStatus().equals(StoragePoolStatus.Up)) { - throw new InvalidParameterValueException(String.format("Primary storage with id %s is not ready for syncing, as the status is %s", pool.getUuid(), pool.getStatus().toString())); + throw new InvalidParameterValueException(String.format("Primary storage %s is not ready for syncing, as the status is %s", pool, pool.getStatus().toString())); } // find the host @@ -2299,11 +2299,11 @@ public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) { final Answer answer = _agentMgr.easySend(hostId, modifyStoragePoolCommand); if (answer == null) { - throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool.getUuid())); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command %s", pool)); } if (!answer.getResult()) { - throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool.getUuid(), hostId, answer.getDetails())); + throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool, _hostDao.findById(hostId), answer.getDetails())); } assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + @@ -2322,7 +2322,7 @@ public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) { } } else { - throw new CloudRuntimeException(String.format("Unable to sync storage pool [%s] as there no connected hosts to the storage pool", pool.getUuid())); + throw new CloudRuntimeException(String.format("Unable to sync storage pool [%s] as there no connected hosts to the storage pool", pool)); } return (PrimaryDataStoreInfo) _dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } @@ -2469,8 +2469,8 @@ public void validateChildDatastoresToBeAddedInUpState(StoragePoolVO datastoreClu } } if (dataStoreVO != null && !dataStoreVO.getStatus().equals(StoragePoolStatus.Up)) { - String msg = String.format("Cannot synchronise datastore cluster %s because primary storage with id %s is not in Up state, " + - "current state is %s", datastoreClusterPool.getUuid(), dataStoreVO.getUuid(), dataStoreVO.getStatus().toString()); + String msg = String.format("Cannot synchronise datastore cluster %s because primary storage %s is not in Up state, " + + "current state is %s", datastoreClusterPool, dataStoreVO, dataStoreVO.getStatus().toString()); throw new CloudRuntimeException(msg); } } @@ -2542,18 +2542,17 @@ private void handleRemoveChildStoragePoolFromDatastoreCluster(Set childD details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); disk.setDetails(details); - logger.debug(String.format("Attempting to process SyncVolumePathCommand for the volume %d on the host %d with state %s", volumeId, hostId, hostVO.getResourceState())); + logger.debug("Attempting to process SyncVolumePathCommand for the volume {} on the host {} with state {}", volume, hostVO, hostVO.getResourceState()); SyncVolumePathCommand cmd = new SyncVolumePathCommand(disk); final Answer answer = _agentMgr.easySend(hostId, cmd); // validate answer if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the SyncVolumePath command for volume " + volumeId); + throw new CloudRuntimeException(String.format("Unable to get an answer to the SyncVolumePath command for volume %s", volume)); } if (!answer.getResult()) { - throw new CloudRuntimeException("Unable to process SyncVolumePathCommand for the volume" + volumeId + " to the host " + hostId + " due to " + answer.getDetails()); + throw new CloudRuntimeException(String.format("Unable to process SyncVolumePathCommand for the volume %s to the host %s due to %s", volume, hostVO, answer.getDetails())); } - assert (answer instanceof SyncVolumePathAnswer) : "Well, now why won't you actually return the SyncVolumePathAnswer when it's SyncVolumePathCommand? volume=" + - volume.getUuid() + "Host=" + hostId; + assert (answer instanceof SyncVolumePathAnswer) : String.format("Well, now why won't you actually return the SyncVolumePathAnswer when it's SyncVolumePathCommand? volume=%s Host=%s", volume, hostVO); // check for the changed details of volume and update database VolumeVO volumeVO = volumeDao.findById(volumeId); @@ -2563,7 +2562,7 @@ private void handleRemoveChildStoragePoolFromDatastoreCluster(Set childD if (storagePoolVO != null) { volumeVO.setPoolId(storagePoolVO.getId()); } else { - logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); + logger.warn("Unable to find datastore {} while updating the new datastore of the volume {}", datastoreName, volumeVO); } } @@ -2627,7 +2626,7 @@ public void onManagementNodeJoined(List nodeList public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost vo : nodeList) { if (vo.getMsid() == _serverId) { - logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid()); + logger.info("Cleaning up storage maintenance jobs associated with Management server: {}", vo); List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); if (poolIds.size() > 0) { for (Long poolId : poolIds) { @@ -2840,7 +2839,7 @@ public Host updateSecondaryStorage(long secStorageId, String newUrl) { } if (secHost.getType() != Host.Type.SecondaryStorage) { - throw new InvalidParameterValueException("host: " + secStorageId + " is not a secondary storage"); + throw new InvalidParameterValueException(String.format("host: %s is not a secondary storage", secHost)); } URI uri = null; @@ -2916,13 +2915,11 @@ private boolean checkUsagedSpace(StoragePool pool) { double usedPercentage = ((double)usedSize / (double)totalSize); double storageUsedThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(pool.getDataCenterId()); if (logger.isDebugEnabled()) { - logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + pool.getUsedBytes() + - ", usedPct: " + usedPercentage + ", disable threshold: " + storageUsedThreshold); + logger.debug("Checking pool {} for storage, totalSize: {}, usedBytes: {}, usedPct: {}, disable threshold: {}", pool, pool.getCapacityBytes(), pool.getUsedBytes(), usedPercentage, storageUsedThreshold); } if (usedPercentage >= storageUsedThreshold) { if (logger.isDebugEnabled()) { - logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + - " has crossed the pool.storage.capacity.disablethreshold: " + storageUsedThreshold); + logger.debug("Insufficient space on pool: {} since its usage percentage: {} has crossed the pool.storage.capacity.disablethreshold: {}", pool, usedPercentage, storageUsedThreshold); } return false; } @@ -2952,7 +2949,7 @@ protected boolean checkIfPoolIopsCapacityNull(StoragePool pool) { // Only IOPS-guaranteed primary storage like SolidFire is using/setting IOPS. // This check returns true for storage that does not specify IOPS. if (pool.getCapacityIops() == null) { - logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); + logger.info("Storage pool {} does not supply IOPS capacity, assuming enough capacity", pool); return true; } @@ -3037,7 +3034,7 @@ public boolean storagePoolHasEnoughSpace(List> volumeD // allocated space includes templates if (logger.isDebugEnabled()) { - logger.debug("Destination pool id: " + pool.getId()); + logger.debug("Destination pool: {}", pool); } // allocated space includes templates final StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); @@ -3071,7 +3068,7 @@ public boolean storagePoolHasEnoughSpace(List> volumeD } if (logger.isDebugEnabled()) { - logger.debug("Pool ID for the volume with ID " + volumeVO.getId() + " is " + volumeVO.getPoolId()); + logger.debug("Pool ID for the volume {} is {}", volumeVO, volumeVO.getPoolId()); } // A ready-state volume is already allocated in a pool, so the asking size is zero for it. @@ -3092,7 +3089,7 @@ public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long current return false; } if (logger.isDebugEnabled()) { - logger.debug("Destination pool id: " + pool.getId()); + logger.debug("Destination pool: {}", pool); } long totalAskingSize = newSize - currentSize; @@ -3156,7 +3153,7 @@ public boolean isStoragePoolCompliantWithStoragePolicy(List answer : answers) { if (!answer.second().getResult()) { - logger.debug(String.format("Storage pool %s is not compliance with storage policy for volume %s", pool.getUuid(), answer.first().getName())); + logger.debug("Storage pool {} is not compliance with storage policy for volume {}", pool, answer.first().getName()); return false; } } @@ -3178,29 +3175,32 @@ protected boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTemp totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue(); - logger.debug("Found storage pool " + pool.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(pool.getCapacityBytes())); + logger.debug("Found storage pool {} of type {} with overprovisioning factor {}", pool, pool.getPoolType(), overProvFactor); + logger.debug("Total over provisioned capacity calculated is {} * {}", overProvFactor, toHumanReadableSize(pool.getCapacityBytes())); } else { totalOverProvCapacity = pool.getCapacityBytes(); - logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString()); + logger.debug("Found storage pool {} of type {}", poolVO, pool.getPoolType()); } - logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); + logger.debug("Total capacity of the pool {} is {}", poolVO, toHumanReadableSize(totalOverProvCapacity)); double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId()); if (logger.isDebugEnabled()) { - logger.debug("Checking pool: " + pool.getId() + " for storage allocation , maxSize : " + toHumanReadableSize(totalOverProvCapacity) + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) - + ", askingSize : " + toHumanReadableSize(totalAskingSize) + ", allocated disable threshold: " + storageAllocatedThreshold); + logger.debug("Checking pool: {} for storage allocation , maxSize : {}, " + + "totalAllocatedSize : {}, askingSize : {}, allocated disable threshold: {}", + pool, toHumanReadableSize(totalOverProvCapacity), toHumanReadableSize(allocatedSizeWithTemplate), toHumanReadableSize(totalAskingSize), storageAllocatedThreshold); } double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity); if (usedPercentage > storageAllocatedThreshold) { if (logger.isDebugEnabled()) { - logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation since its allocated percentage: " + usedPercentage - + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold); + logger.debug("Insufficient un-allocated capacity on: {} for storage " + + "allocation since its allocated percentage: {} has crossed the allocated" + + " pool.storage.allocated.capacity.disablethreshold: {}", + pool, usedPercentage, storageAllocatedThreshold); } if (!forVolumeResize) { return false; @@ -3220,8 +3220,10 @@ protected boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTemp if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) { if (logger.isDebugEnabled()) { - logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation, not enough storage, maxSize : " + toHumanReadableSize(totalOverProvCapacity) - + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) + ", askingSize : " + toHumanReadableSize(totalAskingSize)); + logger.debug("Insufficient un-allocated capacity on: {} for storage " + + "allocation, not enough storage, maxSize : {}, totalAllocatedSize : {}, " + + "askingSize : {}", pool, toHumanReadableSize(totalOverProvCapacity), + toHumanReadableSize(allocatedSizeWithTemplate), toHumanReadableSize(totalAskingSize)); } return false; @@ -3911,6 +3913,7 @@ public void cleanupDownloadUrls() { for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) { long volumeId = volumeOnImageStore.getVolumeId(); + VolumeVO volume = volumeDao.findById(volumeId); try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), volumeOnImageStore.getExtractUrlCreated()); if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet @@ -3918,7 +3921,7 @@ public void cleanupDownloadUrls() { continue; } expiredVolumeIds.add(volumeId); - logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId); + logger.debug("Removing download url {} for volume {}", volumeOnImageStore.getExtractUrl(), volume); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -3927,7 +3930,7 @@ public void cleanupDownloadUrls() { // Now expunge it from DB since this entry was created only for download purpose _volumeStoreDao.expunge(volumeOnImageStore.getId()); } catch (Throwable th) { - logger.warn("Caught exception while deleting download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); + logger.warn("Caught exception while deleting download url {} for volume {}", volumeOnImageStore.getExtractUrl(), volume, th); } } for (Long volumeId : expiredVolumeIds) { @@ -3943,14 +3946,14 @@ public void cleanupDownloadUrls() { // Cleanup expired template URLs List templatesOnImageStoreList = _templateStoreDao.listTemplateDownloadUrls(); for (TemplateDataStoreVO templateOnImageStore : templatesOnImageStoreList) { - + VMTemplateVO template = _templateDao.findById(templateOnImageStore.getId()); try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), templateOnImageStore.getExtractUrlCreated()); if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet continue; } - logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); + logger.debug("Removing download url {} for template {}", templateOnImageStore.getExtractUrl(), template); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -3961,7 +3964,7 @@ public void cleanupDownloadUrls() { templateOnImageStore.setExtractUrlCreated(null); _templateStoreDao.update(templateOnImageStore.getId(), templateOnImageStore); } catch (Throwable th) { - logger.warn("caught exception while deleting download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId(), th); + logger.warn("caught exception while deleting download url {} for template {}", templateOnImageStore.getExtractUrl(), template, th); } } @@ -3973,7 +3976,7 @@ public void cleanupDownloadUrls() { secStore.deleteExtractUrl(imageStoreObjectDownloadVO.getPath(), imageStoreObjectDownloadVO.getDownloadUrl(), null); _imageStoreObjectDownloadDao.expunge(imageStoreObjectDownloadVO.getId()); } catch (Throwable th) { - logger.warn("caught exception while deleting download url " + imageStoreObjectDownloadVO.getDownloadUrl() + " for object id " + imageStoreObjectDownloadVO.getId(), th); + logger.warn("caught exception while deleting download url {} for object {}", imageStoreObjectDownloadVO.getDownloadUrl(), imageStoreObjectDownloadVO, th); } } } @@ -4189,7 +4192,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _objectStoreDao.remove(storeId); } }); - logger.debug("Successfully deleted object store with Id: "+storeId); + logger.debug("Successfully deleted object store: {}", store); return true; } @@ -4230,7 +4233,7 @@ public ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd) { objectStoreVO.setName(cmd.getName()); } _objectStoreDao.update(id, objectStoreVO); - logger.debug("Successfully updated object store with Id: "+id); + logger.debug("Successfully updated object store: {}", objectStoreVO); return objectStoreVO; } } diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index 6bb0dec26d8a..3c032306aa20 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -203,13 +203,13 @@ public void setDisconnected() { } public void logDisconnect() { - logger.warn("Unable to monitor download progress of {} : uuid: {}({}) at host {}", - object.getType(), object.getId(), object, _ssAgent.getId()); + logger.warn("Unable to monitor download progress of {} : uuid: {}({}) at host [id: {}, uuid: {}]", + object.getType(), object.getId(), object, _ssAgent.getId(), _ssAgent.getUuid()); } public void log(String message, Level level) { - logger.log(level, "{}, {}: {}({}) at host {}", - message, object.getType(), object.getId(), object, _ssAgent.getId()); + logger.log(level, "{}, {}: {}({}) at host [id: {}, uuid: {}]", + message, object.getType(), object.getId(), object, _ssAgent.getId(), _ssAgent.getUuid()); } public DownloadListener(DownloadMonitorImpl monitor) { diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 62d4de8761dc..4d8894936cfd 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -133,7 +133,7 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) logger.debug("Host {} connected, connecting host to shared pool {} and sending storage pool information ...", host, pool); } try { - _storageManager.connectHostToSharedPool(hostId, pool.getId()); + _storageManager.connectHostToSharedPool(host, pool.getId()); _storageManager.createCapacityEntry(pool.getId()); } catch (Exception e) { throw new ConnectionException(true, String.format("Unable to connect host %s to storage pool %s due to %s", host, pool, e.toString()), e); @@ -145,9 +145,14 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) @Override public synchronized boolean processDisconnect(long agentId, Status state) { + return processDisconnect(agentId, null, null, state); + } + + @Override + public synchronized boolean processDisconnect(long agentId, String uuid, String name, Status state) { Host host = _storageManager.getHost(agentId); if (host == null) { - logger.warn("Agent: " + agentId + " not found, not disconnecting pools"); + logger.warn("Agent [id: {}, uuid: {}, name: {}] not found, not disconnecting pools", agentId, uuid, name); return false; } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 04b64dd80a36..12e9490a0d4d 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2097,7 +2097,7 @@ private boolean upgradeRunningVirtualMachine(Long vmId, Long newServiceOfferingI // #1 Check existing host has capacity & and the correct tags if (!excludes.shouldAvoid(ApiDBUtils.findHostById(vmInstance.getHostId()))) { existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(vmInstance.getHostId(), newCpu, newSpeed) - && _capacityMgr.checkIfHostHasCapacity(vmInstance.getHostId(), cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, + && _capacityMgr.checkIfHostHasCapacity(host, cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_CPU), _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_MEMORY), false) && checkEnforceStrictHostTagCheck(vmInstance, host); diff --git a/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java b/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java index 2c7a2a7fb528..c9f0993213c0 100644 --- a/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java +++ b/server/src/test/java/com/cloud/capacity/CapacityManagerTest.java @@ -20,6 +20,7 @@ import com.cloud.capacity.dao.CapacityDao; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; +import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.vm.VirtualMachine; @@ -37,6 +38,7 @@ public class CapacityManagerTest { ServiceOfferingDao SOfferingDao = mock(ServiceOfferingDao.class); ClusterDetailsDao ClusterDetailsDao = mock(com.cloud.dc.ClusterDetailsDao.class); CapacityManagerImpl capMgr; + private Host host = mock(Host.class); private ServiceOfferingVO svo = mock(ServiceOfferingVO.class); private CapacityVO cvoCpu = mock(CapacityVO.class); private CapacityVO cvoRam = mock(CapacityVO.class); @@ -71,7 +73,7 @@ public void allocateCapacityTest() { when(clusterDetailRam.getValue()).thenReturn("1.5"); when(clusterDetailCpu.getValue()).thenReturn("2"); when(CDao.update(anyLong(), isA(CapacityVO.class))).thenReturn(true); - boolean hasCapacity = capMgr.checkIfHostHasCapacity(1l, 500, 1024 * 1024 * 1024, false, 2, 2, false); + boolean hasCapacity = capMgr.checkIfHostHasCapacity(host, 500, 1024 * 1024 * 1024, false, 2, 2, false); Assert.assertTrue(hasCapacity); } diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index 482d17908f43..58bc85097684 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -891,7 +891,7 @@ private DeploymentClusterPlanner setupMocksForPlanDeploymentHostTests(HostVO hos Mockito.when(capacityMgr.checkIfHostReachMaxGuestLimit(host)).thenReturn(false); Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); Mockito.when(capacityMgr.checkIfHostHasCapacity( - ArgumentMatchers.anyLong(), + ArgumentMatchers.any(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyLong(), ArgumentMatchers.anyBoolean(), @@ -902,7 +902,7 @@ private DeploymentClusterPlanner setupMocksForPlanDeploymentHostTests(HostVO hos Mockito.when(serviceOfferingDetailsDao.findDetail(vmProfile.getServiceOfferingId(), GPU.Keys.vgpuType.toString())).thenReturn(null); Mockito.doReturn(true).when(_dpm).checkVmProfileAndHost(vmProfile, host); - Mockito.doReturn(true).when(_dpm).checkIfHostFitsPlannerUsage(ArgumentMatchers.anyLong(), ArgumentMatchers.nullable(PlannerResourceUsage.class)); + Mockito.doReturn(true).when(_dpm).checkIfHostFitsPlannerUsage(ArgumentMatchers.any(Host.class), ArgumentMatchers.nullable(PlannerResourceUsage.class)); Mockito.when(clusterDetailsDao.findDetail(ArgumentMatchers.anyLong(), ArgumentMatchers.anyString())).thenReturn(new ClusterDetailsVO(clusterId, "mock", "1")); DeploymentClusterPlanner planner = Mockito.spy(new FirstFitPlanner()); diff --git a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java index 3be6e02d04b1..c700188a5999 100644 --- a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java +++ b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java @@ -64,11 +64,11 @@ public void testProcessConnectStoragePoolNormal() throws Exception { Mockito.when(poolDao.listBy(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class), Mockito.anyBoolean())).thenReturn(Collections.emptyList()); Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host.getId(), pool.getId()); + Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host, pool.getId()); storagePoolMonitor.processConnect(host, cmd, false); - Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(host.getId()), Mockito.eq(pool.getId())); + Mockito.verify(storageManager, Mockito.times(1)).connectHostToSharedPool(Mockito.eq(host), Mockito.eq(pool.getId())); Mockito.verify(storageManager, Mockito.times(1)).createCapacityEntry(Mockito.eq(pool.getId())); } @@ -77,7 +77,7 @@ public void testProcessConnectStoragePoolFailureOnHost() throws Exception { Mockito.when(poolDao.listBy(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class), Mockito.anyBoolean())).thenReturn(Collections.emptyList()); Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doThrow(new StorageUnavailableException("unable to mount storage", 123L)).when(storageManager).connectHostToSharedPool(Mockito.anyLong(), Mockito.anyLong()); + Mockito.doThrow(new StorageUnavailableException("unable to mount storage", 123L)).when(storageManager).connectHostToSharedPool(Mockito.any(), Mockito.anyLong()); storagePoolMonitor.processConnect(host, cmd, false); } diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerTest.java index 8be100d45708..c3255e064499 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerTest.java @@ -419,7 +419,7 @@ public void testScaleVMF4() throws Exception { doReturn(VirtualMachine.State.Running).when(_vmInstance).getState(); //when(ApiDBUtils.getCpuOverprovisioningFactor()).thenReturn(3f); - when(_capacityMgr.checkIfHostHasCapacity(anyLong(), anyInt(), anyLong(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false); + when(_capacityMgr.checkIfHostHasCapacity(any(), anyInt(), anyLong(), anyBoolean(), anyFloat(), anyFloat(), anyBoolean())).thenReturn(false); when(_itMgr.reConfigureVm(_vmInstance.getUuid(), so2, so1, new HashMap(), false)).thenReturn(_vmInstance); doReturn(true).when(_itMgr).upgradeVmDb(anyLong(), so1, so2); From 2bf838bcbca9581d56bc7693fb97ed5d73881a25 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Tue, 3 Dec 2024 16:55:58 +0530 Subject: [PATCH 10/22] fixup --- .../src/main/java/com/cloud/agent/manager/AgentManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index caefc81848e6..0c113c90e6ac 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -1198,7 +1198,7 @@ public SimulateStartTask(final long id, String uuid, String name, final ServerRe @Override protected void runInContext() { try { - logger.debug("Simulating start for resource {} (id: {}, uuid: {}, name; {})", resource.getName(), id, uuid, name); + logger.debug("Simulating start for resource {} (id: {}, uuid: {}, name {})", resource.getName(), id, uuid, name); if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { From 20dbdaa05033ede4fb36172271908dfdae3dc3da Mon Sep 17 00:00:00 2001 From: Vishesh Date: Tue, 12 Nov 2024 15:14:33 +0530 Subject: [PATCH 11/22] Improve logging to include more identifiable information for engine --- .../main/java/com/cloud/vm/NicProfile.java | 5 +- .../storage/to/TemplateObjectTO.java | 2 +- .../cloudstack/storage/to/VolumeObjectTO.java | 2 +- .../subsystem/api/storage/TemplateInfo.java | 1 + .../com/cloud/vm/VmWorkJobHandlerProxy.java | 6 +- .../cloud/vm/VmWorkJobWakeupDispatcher.java | 11 +- .../api/DataCenterResourceManagerImpl.java | 2 +- .../api/db/dao/EngineClusterDaoImpl.java | 2 +- .../api/db/dao/EngineDataCenterDaoImpl.java | 2 +- .../entity/api/db/dao/EngineHostDaoImpl.java | 2 +- .../api/db/dao/EngineHostPodDaoImpl.java | 2 +- .../orchestration/StorageOrchestrator.java | 11 +- .../com/cloud/user/dao/AccountDaoImpl.java | 2 +- .../com/cloud/vm/dao/VMInstanceDaoImpl.java | 18 +- .../vm/snapshot/dao/VMSnapshotDaoImpl.java | 2 +- ...vmNonManagedStorageDataMotionStrategy.java | 12 +- .../image/TemplateDataFactoryImpl.java | 10 +- .../storage/image/TemplateServiceImpl.java | 72 ++++---- .../storage/image/store/TemplateObject.java | 1 + .../snapshot/DefaultSnapshotStrategy.java | 14 +- .../snapshot/SnapshotDataFactoryImpl.java | 2 +- .../storage/snapshot/SnapshotObject.java | 8 +- .../storage/snapshot/SnapshotServiceImpl.java | 56 +++--- .../endpoint/DefaultEndPointSelector.java | 2 +- .../storage/helper/VMSnapshotHelperImpl.java | 6 +- .../image/BaseImageStoreDriverImpl.java | 23 +-- .../datastore/PrimaryDataStoreHelper.java | 9 +- .../storage/volume/VolumeServiceImpl.java | 173 +++++++++--------- .../framework/jobs/impl/AsyncJobVO.java | 1 + 29 files changed, 227 insertions(+), 232 deletions(-) diff --git a/api/src/main/java/com/cloud/vm/NicProfile.java b/api/src/main/java/com/cloud/vm/NicProfile.java index 183c8dcb2d59..a0c80ceb1bfb 100644 --- a/api/src/main/java/com/cloud/vm/NicProfile.java +++ b/api/src/main/java/com/cloud/vm/NicProfile.java @@ -450,6 +450,9 @@ public void deallocate() { @Override public String toString() { - return String.format("NicProfile %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "vmId", "deviceId", "broadcastUri", "reservationId", "iPv4Address")); + return String.format("NicProfile %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vmId", "deviceId", + "broadcastUri", "reservationId", "iPv4Address")); } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java index eafe8f83269c..6b98baf15d40 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -264,6 +264,6 @@ public void setDeployAsIsConfiguration(String deployAsIsConfiguration) { @Override public String toString() { - return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString(); + return String.format("TemplateTO[id=%d|uuid=%s|origUrl=%s|name%s]", id, uuid, origUrl, name); } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 6514038ac623..b84f0204f9bd 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -258,7 +258,7 @@ public void setPoolId(Long poolId){ @Override public String toString() { - return new StringBuilder("volumeTO[uuid=").append(uuid).append("|path=").append(path).append("|datastore=").append(dataStore).append("]").toString(); + return String.format("volumeTO[id=%s|uuid=%s|name=%s|path=%s|datastore=%s]", id, uuid, name, path, dataStore); } public void setBytesReadRate(Long bytesReadRate) { diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index 3bd3100e84ed..a0b62ebce54a 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -22,6 +22,7 @@ import com.cloud.user.UserData; public interface TemplateInfo extends DownloadableDataInfo, VirtualMachineTemplate { + VirtualMachineTemplate getImage(); @Override String getUniqueName(); diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java index c82edc70ded2..37683aa3758e 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java @@ -44,10 +44,8 @@ public class VmWorkJobHandlerProxy implements VmWorkJobHandler { private Object _target; private Map, Method> _handlerMethodMap = new HashMap, Method>(); - private Gson _gsonLogger; public VmWorkJobHandlerProxy(Object target) { - _gsonLogger = GsonHelper.getGsonLogger(); buildLookupMap(target.getClass()); _target = target; @@ -123,10 +121,10 @@ public Pair handleVmWorkJob(VmWork work) throws Exceptio throw e; } } else { - logger.error("Unable to find handler for VM work job: " + work.getClass().getName() + _gsonLogger.toJson(work)); + logger.error("Unable to find handler for VM work job: {} {}", work.getClass().getName(), work); RuntimeException ex = new RuntimeException("Unable to find handler for VM work job: " + work.getClass().getName()); - return new Pair(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); + return new Pair<>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java index b7c82ce5c21e..1b050ffd9de6 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobWakeupDispatcher.java @@ -67,8 +67,8 @@ public void runJob(AsyncJob job) { try { List joinRecords = _joinMapDao.listJoinRecords(job.getId()); if (joinRecords.size() != 1) { - logger.warn("AsyncJob-" + job.getId() - + " received wakeup call with un-supported joining job number: " + joinRecords.size()); + logger.warn("AsyncJob-{} ({}) received wakeup call with un-supported " + + "joining job number: {}", job.getId(), job, joinRecords.size()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); @@ -82,7 +82,7 @@ public void runJob(AsyncJob job) { try { workClz = Class.forName(job.getCmd()); } catch (ClassNotFoundException e) { - logger.error("VM work class " + job.getCmd() + " is not found", e); + logger.error("VM work class {} for job {} is not found", job.getCmd(), job, e); return; } @@ -103,14 +103,13 @@ public void runJob(AsyncJob job) { handler.invoke(_vmMgr); } else { assert (false); - logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + - " when waking up job-" + job.getId()); + logger.error("Unable to find wakeup handler {} when waking up job-{} ({})", joinRecord.getWakeupHandler(), job.getId(), job); } } finally { CallContext.unregister(); } } catch (Throwable e) { - logger.warn("Unexpected exception in waking up job-" + job.getId()); + logger.warn("Unexpected exception in waking up job-{} ({})", job.getId(), job); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java index 41366f73a01c..e48481324df6 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/DataCenterResourceManagerImpl.java @@ -96,7 +96,7 @@ public EngineHostPodVO loadPod(String uuid) { public EngineClusterVO loadCluster(String uuid) { EngineClusterVO cluster = _clusterDao.findByUuid(uuid); if (cluster == null) { - throw new InvalidParameterValueException("Pod does not exist"); + throw new InvalidParameterValueException("Cluster does not exist"); } return cluster; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java index cc33f9eb3355..fa8b782f662e 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java @@ -297,7 +297,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter {} with id={}, as there is no such dataCenter exists in the database anymore", vo, vo.getId()); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java index 03b4bd9eaaf4..96dfdc00d676 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java @@ -300,7 +300,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter {} with id {}, as there is no such dataCenter exists in the database anymore", vo, vo.getId()); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java index 2099ebadb9f7..2ad8d15d0b71 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java @@ -451,7 +451,7 @@ public boolean updateState(State currentState, DataCenterResourceEntity.State.Ev .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java index 535e396a376c..58bbfcfc1a1b 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java @@ -183,7 +183,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: {}, as there is no such dataCenter exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index ec5d5efb5cfb..830daef0580d 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -151,7 +151,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates); if (files.isEmpty()) { - return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore.getId()), migrationPolicy.toString(), true); + return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore), migrationPolicy.toString(), true); } Map> storageCapacities = new Hashtable<>(); for (Long storeId : destDatastores) { @@ -159,7 +159,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); if (migrationPolicy == MigrationPolicy.COMPLETE) { - logger.debug("Setting source image store: {} to read-only", srcDatastore.getId()); + logger.debug("Setting source image store: {} to read-only", srcDatastore); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -309,8 +309,9 @@ protected Pair migrateCompleted(Long destDatastoreId, DataStore message += "Image stores have been attempted to be balanced"; success = true; } else { - message = "Files not completely migrated from "+ srcDatastore.getId() + ". Datastore (source): " + srcDatastore.getId() + "has equal or more free space than destination."+ - " If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command"; + message = String.format("Files not completely migrated from %s. Datastore (source): %s " + + "has equal or more free space than destination. If you want to continue using the Image Store, " + + "please change the read-only status using 'update imagestore' command", srcDatastore, srcDatastore); success = false; } } else { @@ -353,7 +354,7 @@ protected Map> migrateAway( task.setTemplateChain(templateChains); } futures.add((executor.submit(task))); - logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); + logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()); return storageCapacities; } diff --git a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java index f9ef5c40eba2..2654b22374f4 100644 --- a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java @@ -310,7 +310,7 @@ public void markForCleanup(long accountId) { if (!account.getNeedsCleanup()) { account.setNeedsCleanup(true); if (!update(accountId, account)) { - logger.warn("Failed to mark account id=" + accountId + " for cleanup"); + logger.warn("Failed to mark account {} for cleanup", account); } } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 744518ba7433..0e87e6bcb7db 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -109,7 +109,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Inject VolumeDao volumeDao; @Inject - HostDao hostDao; + protected HostDao hostDao; protected Attribute _updateTimeAttr; @@ -140,8 +140,6 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem private static final String COUNT_VMS_BY_ZONE_AND_STATE_AND_HOST_TAG = "SELECT COUNT(1) FROM vm_instance vi JOIN service_offering so ON vi.service_offering_id=so.id " + "JOIN vm_template vt ON vi.vm_template_id = vt.id WHERE vi.data_center_id = ? AND vi.state = ? AND vi.removed IS NULL AND (so.host_tag = ? OR vt.template_tag = ?)"; - @Inject - protected HostDao _hostDao; public VMInstanceDaoImpl() { } @@ -155,13 +153,13 @@ protected void init() { IdStatesSearch.done(); VMClusterSearch = createSearchBuilder(); - SearchBuilder hostSearch = _hostDao.createSearchBuilder(); + SearchBuilder hostSearch = hostDao.createSearchBuilder(); VMClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), VMClusterSearch.entity().getHostId(), JoinType.INNER); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); VMClusterSearch.done(); LHVMClusterSearch = createSearchBuilder(); - SearchBuilder hostSearch1 = _hostDao.createSearchBuilder(); + SearchBuilder hostSearch1 = hostDao.createSearchBuilder(); LHVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), LHVMClusterSearch.entity().getLastHostId(), JoinType.INNER); LHVMClusterSearch.and("hostid", LHVMClusterSearch.entity().getHostId(), Op.NULL); hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ); @@ -577,13 +575,13 @@ public boolean updateState(State oldState, Event event, State newState, VirtualM logger.debug(str.toString()); } else { - logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); + logger.debug("Unable to update the vm {}; the vm either doesn't exist or already removed", vm); } } if (vo != null && vo.getState() == newState) { // allow for concurrent update if target state has already been matched - logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); + logger.debug("VM {} state has been already been updated to {}", vo, newState); return true; } } @@ -954,8 +952,10 @@ private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerSt State instanceState = instance.getState(); if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) || (powerState == VirtualMachine.PowerState.PowerOn && instanceState == State.Stopped)) { - logger.debug(String.format("VM id: %d on host id: %d and power host id: %d is in %s state, but power state is %s", - instance.getId(), instance.getHostId(), powerHostId, instanceState, powerState)); + HostVO instanceHost = hostDao.findById(instance.getHostId()); + HostVO powerHost = powerHostId == instance.getHostId() ? instanceHost : hostDao.findById(powerHostId); + logger.debug("VM: {} on host: {} and power host : {} is in {} state, but power state is {}", + instance, instanceHost, powerHost, instanceState, powerState); return false; } return true; diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java index ab8f5f2cd849..03a978f85469 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java @@ -176,7 +176,7 @@ public boolean updateState(State currentState, Event event, State nextState, VMS .append("; updatedTime=") .append(oldUpdatedTime); } else { - logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); + logger.debug("Unable to update VM snapshot: {}, as there is no such snapshot exists in the database anymore", vo); } } return rows > 0; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java index bf8fa43fe6c2..1212bc66fd74 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java @@ -210,7 +210,7 @@ protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolum TemplateInfo directDownloadTemplateInfo = templateDataFactory.getReadyBypassedTemplateOnPrimaryStore(srcVolumeInfo.getTemplateId(), destDataStore.getId(), destHost.getId()); if (directDownloadTemplateInfo != null) { - logger.debug(String.format("Template %s was of direct download type and successfully staged to primary store %s", directDownloadTemplateInfo.getId(), directDownloadTemplateInfo.getDataStore().getId())); + logger.debug("Template {} was of direct download type and successfully staged to primary store {}", directDownloadTemplateInfo.getImage(), directDownloadTemplateInfo.getDataStore()); return; } @@ -221,8 +221,8 @@ protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolum TemplateInfo sourceTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), sourceTemplateDataStore); TemplateObjectTO sourceTemplate = new TemplateObjectTO(sourceTemplateInfo); - logger.debug(String.format("Could not find template [id=%s, name=%s] on the storage pool [id=%s]; copying the template to the target storage pool.", - srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getName(), destDataStore.getId())); + logger.debug("Could not find template [id={}, uuid={}, name={}] on the storage pool [{}]; copying the template to the target storage pool.", + srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getUuid(), sourceTemplateInfo.getName(), destDataStore); TemplateInfo destTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), destDataStore); final TemplateObjectTO destTemplate = new TemplateObjectTO(destTemplateInfo); @@ -234,7 +234,8 @@ protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolum return; } } - logger.debug(String.format("Skipping 'copy template to target filesystem storage before migration' due to the template [%s] already exist on the storage pool [%s].", srcVolumeInfo.getTemplateId(), destStoragePool.getId())); + logger.debug("Skipping 'copy template to target filesystem storage before migration' due to the template [{}] already exist on the storage pool [{}].", + srcVolumeInfo.getTemplateId(), destStoragePool); } /** @@ -267,8 +268,7 @@ protected Answer sendCopyCommand(Host destHost, TemplateObjectTO sourceTemplate, } private String generateFailToCopyTemplateMessage(TemplateObjectTO sourceTemplate, DataStore destDataStore) { - return String.format("Failed to copy template [id=%s, name=%s] to the primary storage pool [id=%s].", sourceTemplate.getId(), - sourceTemplate.getName(), destDataStore.getId()); + return String.format("Failed to copy template [%s] to the primary storage pool [%s].", sourceTemplate, destDataStore); } /** diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index 5109118fb54e..c6430bcf9f93 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -123,9 +123,9 @@ public TemplateInfo getTemplate(long templateId, DataStore store) { if (logger.isDebugEnabled()) { if (!found) { - logger.debug("template " + templateId + " is not in store:" + store.getId() + ", type:" + store.getRole()); + logger.debug("template {} with id {} is not in store: {}, type: {}", templ, templateId, store, store.getRole()); } else { - logger.debug("template " + templateId + " is already in store:" + store.getId() + ", type:" + store.getRole()); + logger.debug("template {} with id {} is already in store:{}, type: {}", templ, templateId, store, store.getRole()); } } @@ -242,7 +242,7 @@ protected Long getBypassedTemplateExistingOrNewPoolId(VMTemplateVO templateVO, L HostVO host = hostDao.findById(hostId); List pools = getStoragePoolsForScope(host.getDataCenterId(), host.getClusterId(), hostId, host.getHypervisorType()); if (CollectionUtils.isEmpty(pools)) { - throw new CloudRuntimeException(String.format("No storage pool found to download template: %s", templateVO.getName())); + throw new CloudRuntimeException(String.format("No storage pool found to download template: %s", templateVO)); } List existingRefs = templatePoolDao.listByTemplateId(templateVO.getId()); return getOneMatchingPoolIdFromRefs(existingRefs, pools); @@ -274,7 +274,7 @@ public TemplateInfo getReadyBypassedTemplateOnManagedStorage(long templateId, Te } if (poolId == null) { - throw new CloudRuntimeException("No storage pool specified to download template: " + templateId); + throw new CloudRuntimeException(String.format("No storage pool specified to download template: %s", templateVO)); } StoragePoolVO poolVO = primaryDataStoreDao.findById(poolId); @@ -284,7 +284,7 @@ public TemplateInfo getReadyBypassedTemplateOnManagedStorage(long templateId, Te VMTemplateStoragePoolVO spoolRef = templatePoolDao.findByPoolTemplate(poolId, templateId, null); if (spoolRef == null) { - throw new CloudRuntimeException("Template not created on managed storage pool: " + poolId + " to copy the download template: " + templateId); + throw new CloudRuntimeException(String.format("Template not created on managed storage pool: %s to copy the download template: %s", poolVO, templateVO)); } else if (spoolRef.getDownloadState() == VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED) { directDownloadManager.downloadTemplate(templateId, poolId, hostId); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index abc955c2e496..1ea2c34de77f 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -280,7 +280,7 @@ public void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId) { TemplateDataStoreVO tmpltHost = _vmTemplateStoreDao.findByStoreTemplate(store.getId(), template.getId()); if (tmpltHost == null) { associateTemplateToZone(template.getId(), dcId); - logger.info("Downloading builtin template " + template.getUniqueName() + " to data center: " + dcId); + logger.info("Downloading builtin template {} to data center: {}", template, dcId); TemplateInfo tmplt = _templateFactory.getTemplate(template.getId(), DataStoreRole.Image); createTemplateAsync(tmplt, store, null); } @@ -299,7 +299,7 @@ protected boolean isSkipTemplateStoreDownload(VMTemplateVO template, Long zoneId return false; } if (zoneId != null && _vmTemplateStoreDao.findByTemplateZone(template.getId(), zoneId, DataStoreRole.Image) == null) { - logger.debug(String.format("Template %s is not present on any image store for the zone ID: %d, its download cannot be skipped", template.getUniqueName(), zoneId)); + logger.debug("Template {} is not present on any image store for the zone ID: {}, its download cannot be skipped", template, zoneId); return false; } return true; @@ -376,29 +376,29 @@ public void handleTemplateSync(DataStore store) { TemplateProp tmpltInfo = templateInfos.remove(uniqueName); toBeDownloaded.remove(tmplt); if (tmpltStore != null) { - logger.info("Template Sync found " + uniqueName + " already in the image store"); + logger.info("Template Sync found {} already in the image store", tmplt); if (tmpltStore.getDownloadState() != Status.DOWNLOADED) { tmpltStore.setErrorString(""); } if (tmpltInfo.isCorrupted()) { tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId(); + String msg = String.format("Template %s is corrupted on secondary storage %s", tmplt, store); tmpltStore.setErrorString(msg); logger.info(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED, zoneId, null, msg, msg); if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) { - logger.info("Template Sync found " + uniqueName + " on image store " + storeId + " uploaded using SSVM as corrupted, marking it as failed"); + logger.info("Template Sync found {} on image store {} uploaded using SSVM as corrupted, marking it as failed", tmplt, store); tmpltStore.setState(State.Failed); try { stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.getUrl() == null) { msg = "Private template (" + tmplt + ") with install path " + tmpltInfo.getInstallPath() + " is corrupted, please check in image store: " + tmpltStore.getDataStoreId(); logger.warn(msg); } else { - logger.info("Removing template_store_ref entry for corrupted template " + tmplt.getName()); + logger.info("Removing template_store_ref entry for corrupted template {}", tmplt); _vmTemplateStoreDao.remove(tmpltStore.getId()); toBeDownloaded.add(tmplt); } @@ -438,7 +438,7 @@ public void handleTemplateSync(DataStore store) { try { stateMachine.transitTo(tmplt, event, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } @@ -483,30 +483,30 @@ public void handleTemplateSync(DataStore store) { tmpltInfo.getPhysicalSize(), tmpltInfo.getSize(), VirtualMachineTemplate.class.getName(), tmplt.getUuid()); } } else if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) { - logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + " uploaded using SSVM, marking it as failed"); + logger.info("Template Sync did not find {} on image store {} uploaded using SSVM, marking it as failed", tmplt, store); toBeDownloaded.remove(tmplt); tmpltStore.setDownloadState(Status.DOWNLOAD_ERROR); - String msg = "Template " + tmplt.getName() + ":" + tmplt.getId() + " is corrupted on secondary storage " + tmpltStore.getId(); + String msg = String.format("Template %s is corrupted on secondary storage %s", tmplt, store); tmpltStore.setErrorString(msg); tmpltStore.setState(State.Failed); _vmTemplateStoreDao.update(tmpltStore.getId(), tmpltStore); try { stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); } catch (NoTransitionException e) { - logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage()); + logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.isDirectDownload()) { - logger.info("Template " + tmplt.getName() + ":" + tmplt.getId() + " is marked for direct download, discarding it for download on image stores"); + logger.info("Template {} is marked for direct download, discarding it for download on image stores", tmplt); toBeDownloaded.remove(tmplt); } else { - logger.info("Template Sync did not find " + uniqueName + " on image store " + storeId + ", may request download based on available hypervisor types"); + logger.info("Template Sync did not find {} on image store {}, may request download based on available hypervisor types", tmplt, store); if (tmpltStore != null) { if (_storeMgr.isRegionStore(store) && tmpltStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED && tmpltStore.getState() == State.Ready && tmpltStore.getInstallPath() == null) { logger.info("Keep fake entry in template store table for migration of previous NFS to object store"); } else { - logger.info("Removing leftover template " + uniqueName + " entry from template store table"); + logger.info("Removing leftover template {} entry from template store table", tmplt); // remove those leftover entries _vmTemplateStoreDao.remove(tmpltStore.getId()); } @@ -530,12 +530,12 @@ public void handleTemplateSync(DataStore store) { // download. for (VMTemplateVO tmplt : toBeDownloaded) { if (tmplt.getUrl() == null) { // If url is null, skip downloading - logger.info("Skip downloading template " + tmplt.getUniqueName() + " since no url is specified."); + logger.info("Skip downloading template {} since no url is specified.", tmplt); continue; } // if this is private template, skip sync to a new image store if (isSkipTemplateStoreDownload(tmplt, zoneId)) { - logger.info("Skip sync downloading private template " + tmplt.getUniqueName() + " to a new image store"); + logger.info("Skip sync downloading private template {} to a new image store", tmplt); continue; } @@ -551,7 +551,7 @@ public void handleTemplateSync(DataStore store) { } if (availHypers.contains(tmplt.getHypervisorType())) { - logger.info("Downloading template " + tmplt.getUniqueName() + " to image store " + store.getName()); + logger.info("Downloading template {} to image store {}", tmplt, store); associateTemplateToZone(tmplt.getId(), zoneId); TemplateInfo tmpl = _templateFactory.getTemplate(tmplt.getId(), store); TemplateOpContext context = new TemplateOpContext<>(null,(TemplateObject)tmpl, null); @@ -560,8 +560,7 @@ public void handleTemplateSync(DataStore store) { caller.setContext(context); createTemplateAsync(tmpl, store, caller); } else { - logger.info("Skip downloading template " + tmplt.getUniqueName() + " since current data center does not have hypervisor " + - tmplt.getHypervisorType().toString()); + logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType()); } } } @@ -585,10 +584,10 @@ public void handleTemplateSync(DataStore store) { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - logger.info("Failed to deleted template at store: " + store.getName()); + logger.info("Failed to deleted template at store: {}", store); } else { - String description = "Deleted template " + tInfo.getTemplateName() + " on secondary storage " + storeId; + String description = String.format("Deleted template %s on secondary storage %s", tInfo.getTemplateName(), store); logger.info(description); } @@ -598,7 +597,7 @@ public void handleTemplateSync(DataStore store) { syncLock.unlock(); } } else { - logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing template sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on {}, another thread may be doing template sync on data store {} now.", lockString, store); } } finally { syncLock.releaseRef(); @@ -673,15 +672,14 @@ protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher listTemplate(DataStore ssStore) { return tanswer.getTemplateInfo(); } else { if (logger.isDebugEnabled()) { - logger.debug("can not list template for secondary storage host " + ssStore.getId()); + logger.debug("can not list template for secondary storage host {}", ssStore); } } @@ -844,8 +842,7 @@ private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTempl _resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize()); } else { // Delete the Datadisk templates that were already created as they are now invalid - logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent" - + " template download"); + logger.debug("Since creation of Datadisk template: {} failed, delete other Datadisk templates that were created as part of parent template download", templateVO); TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore); cleanupDatadiskTemplates(parentTemplateInfo); } @@ -859,8 +856,7 @@ private boolean finalizeParentTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO TemplateApiResult result = null; result = templateFuture.get(); if (!result.isSuccess()) { - logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent" - + " template download"); + logger.debug("Since creation of parent template: {} failed, delete Datadisk templates that were created as part of parent template download", templateInfo); cleanupDatadiskTemplates(templateInfo); } return result.isSuccess(); @@ -909,12 +905,12 @@ private void cleanupDatadiskTemplates(TemplateInfo parentTemplateInfo) { DataStore imageStore = parentTemplateInfo.getDataStore(); List datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId()); for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) { - logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: {} from image store: {}", datadiskTemplateToDelete, imageStore); AsyncCallFuture future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore)); try { TemplateApiResult result = future.get(); if (!result.isSuccess()) { - logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete datadisk template: {} from image store: {} due to: {}", datadiskTemplateToDelete, imageStore, result.getResult()); break; } _vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId()); @@ -1036,24 +1032,23 @@ public void syncTemplateToRegionStore(long templateId, DataStore store) { // wide store if it is not there physically. TemplateInfo tmplOnStore = _templateFactory.getTemplate(templateId, store); if (tmplOnStore == null) { - throw new CloudRuntimeException("Cannot find an entry in template_store_ref for template " + templateId + " on region store: " + store.getName()); + throw new CloudRuntimeException(String.format("Cannot find an entry in template_store_ref for template %d on region store: %s", templateId, store)); } if (tmplOnStore.getInstallPath() == null || tmplOnStore.getInstallPath().length() == 0) { // template is not on region store yet, sync to region store TemplateInfo srcTemplate = _templateFactory.getReadyTemplateOnCache(templateId); if (srcTemplate == null) { - throw new CloudRuntimeException("Cannot find template " + templateId + " on cache store"); + throw new CloudRuntimeException(String.format("Cannot find template %s on cache store", tmplOnStore)); } AsyncCallFuture future = syncToRegionStoreAsync(srcTemplate, store); try { TemplateApiResult result = future.get(); if (result.isFailed()) { - throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName() + ":" + - result.getResult()); + throw new CloudRuntimeException(String.format("sync template from cache to region wide store failed for image store %s: %s", store, result.getResult())); } _cacheMgr.releaseCacheObject(srcTemplate); // reduce reference count for template on cache, so it can recycled by schedule } catch (Exception ex) { - throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName()); + throw new CloudRuntimeException(String.format("sync template from cache to region wide store failed for image store %s", store)); } } } @@ -1071,8 +1066,7 @@ public AsyncCallFuture copyTemplate(TemplateInfo srcTemplate, // generate a URL from source template ssvm to download to destination data store String url = generateCopyUrl(srcTemplate); if (url == null) { - logger.warn("Unable to start/resume copy of template " + srcTemplate.getUniqueName() + " to " + destStore.getName() + - ", no secondary storage vm in running state in source zone"); + logger.warn("Unable to start/resume copy of template {} to {}, no secondary storage vm in running state in source zone", srcTemplate, destStore); throw new CloudRuntimeException("No secondary VM in running state in source template zone "); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index c12cafad99d0..bf7b0d6c634b 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -102,6 +102,7 @@ public void setSize(Long size) { imageVO.setSize(size); } + @Override public VMTemplateVO getImage() { if (imageVO == null) { String msg = String.format("Template Object is not properly initialised %s", this.toString()); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index afc8be1e5f97..7b1088d589fc 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -136,7 +136,7 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { try { snapObj.processEvent(Snapshot.Event.OperationNotPerformed); } catch (NoTransitionException e) { - logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString()); + logger.debug("Failed to change state: {}: {}", snapshot, e.toString()); throw new CloudRuntimeException(e.toString()); } return snapshotDataFactory.getSnapshot(snapObj.getId(), store); @@ -231,7 +231,7 @@ protected boolean deleteSnapshotChain(SnapshotInfo snapshot, String storageToStr if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - logger.debug(String.format("Deleting snapshot %s from image cache [%s].", snapshotTo, cacheSnap.getDataStore().getName())); + logger.debug("Deleting snapshot {} from image cache [{}].", snapshotTo, cacheSnap.getDataStore()); cacheSnap.delete(); } } @@ -297,7 +297,7 @@ public boolean deleteSnapshot(Long snapshotId, Long zoneId) { if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Destroying.equals(snapshotVO.getState())) { - throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status"); + throw new InvalidParameterValueException(String.format("Can't delete snapshot %s due to it is in %s Status", snapshotVO, snapshotVO.getState())); } return destroySnapshotEntriesAndFiles(snapshotVO, zoneId); @@ -442,7 +442,7 @@ public boolean revertSnapshot(SnapshotInfo snapshot) { SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); if (snapshotVO == null) { - throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to get lock on snapshot: %s", snapshot)); } try { @@ -463,9 +463,9 @@ public boolean revertSnapshot(SnapshotInfo snapshot) { result = snapshotSvr.revertSnapshot(snapshot); if (!result) { - logger.debug("Failed to revert snapshot: " + snapshot.getId()); + logger.debug("Failed to revert snapshot: {}", snapshot); - throw new CloudRuntimeException("Failed to revert snapshot: " + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to revert snapshot: %s", snapshot)); } } finally { if (result) { @@ -498,7 +498,7 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) { SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); if (snapshotVO == null) { - throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to get lock on snapshot: %s", snapshot)); } try { diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index fc5e61ef710f..4d8919ccc489 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -57,7 +57,7 @@ public SnapshotInfo getSnapshot(long snapshotId, DataStore store) { public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { SnapshotVO snapshot = snapshotDao.findById(obj.getId()); if (snapshot == null) { - throw new CloudRuntimeException("Can't find snapshot: " + obj.getId()); + throw new CloudRuntimeException("Can't find snapshot: " + obj); } SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); return so; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index e71a01fb4c5c..8a2e1565fe22 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -184,8 +184,7 @@ public void markBackedUp() throws CloudRuntimeException{ processEvent(Event.OperationNotPerformed); } catch (NoTransitionException ex) { logger.error("no transition error: ", ex); - throw new CloudRuntimeException("Error marking snapshot backed up: " + - this.snapshot.getId() + " " + ex.getMessage()); + throw new CloudRuntimeException(String.format("Error marking snapshot backed up: %s %s", this.snapshot, ex.getMessage())); } } @@ -370,12 +369,11 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answe if (snapshotTO.getVolume() != null && snapshotTO.getVolume().getPath() != null) { VolumeVO vol = volumeDao.findByUuid(snapshotTO.getVolume().getUuid()); if (vol != null) { - logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " + vol.getPath() + "->" + - snapshotTO.getVolume().getPath()); + logger.info("Update volume path change due to snapshot operation, volume {} path: {}->{}", vol, vol.getPath(), snapshotTO.getVolume().getPath()); vol.setPath(snapshotTO.getVolume().getPath()); volumeDao.update(vol.getId(), vol); } else { - logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); + logger.error("Couldn't find the original volume: {}", snapshotTO.getVolume()); } } } else { diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index dafc40e0674d..37b7d6562383 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -267,14 +267,14 @@ public SnapshotResult takeSnapshot(SnapshotInfo snap) { PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)snapshotOnPrimary.getDataStore().getDriver(); primaryStore.takeSnapshot(snapshot, caller); } catch (Exception e) { - logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + logger.debug("Failed to take snapshot: {}", snapshot, e); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); } catch (NoTransitionException e1) { logger.debug("Failed to change state for event: OperationFailed", e); } - throw new CloudRuntimeException("Failed to take snapshot" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to take snapshot %s", snapshot)); } SnapshotResult result; @@ -407,7 +407,7 @@ protected Void copySnapshotAsyncCallback(AsyncCallbackDispatcher snapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.BackedUp); if (snapshots != null) { for (SnapshotVO snapshot : snapshots) { - syncSnapshotToRegionStore(snapshot.getId(), store); + syncSnapshotToRegionStore(snapshot, store); } } } @@ -603,53 +603,49 @@ public void syncVolumeSnapshotsToRegionStore(long volumeId, DataStore store) { @Override public void cleanupVolumeDuringSnapshotFailure(Long volumeId, Long snapshotId) { - SnapshotVO snaphsot = _snapshotDao.findById(snapshotId); + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snaphsot != null) { - if (snaphsot.getState() != Snapshot.State.BackedUp) { + if (snapshot != null) { + if (snapshot.getState() != Snapshot.State.BackedUp) { List snapshotDataStoreVOs = _snapshotStoreDao.findBySnapshotId(snapshotId); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotDataStoreVOs) { - logger.debug("Remove snapshot " + snapshotId + ", status " + snapshotDataStoreVO.getState() + - " on snapshot_store_ref table with id: " + snapshotDataStoreVO.getId()); + logger.debug("Remove snapshot {}, status {} on snapshot_store_ref table with id: {}", snapshot, snapshotDataStoreVO.getState(), snapshotDataStoreVO.getId()); _snapshotStoreDao.remove(snapshotDataStoreVO.getId()); } - logger.debug("Remove snapshot " + snapshotId + " status " + snaphsot.getState() + " from snapshot table"); + logger.debug("Remove snapshot {} status {} from snapshot table", snapshot, snapshot.getState()); _snapshotDao.remove(snapshotId); } } - - } // push one individual snapshots currently on cache store to region store if it is not there already - private void syncSnapshotToRegionStore(long snapshotId, DataStore store){ + private void syncSnapshotToRegionStore(SnapshotVO snapshot, DataStore store){ // if snapshot is already on region wide object store, check if it is really downloaded there (by checking install_path). Sync snapshot to region // wide store if it is not there physically. - SnapshotInfo snapOnStore = _snapshotFactory.getSnapshot(snapshotId, store); + SnapshotInfo snapOnStore = _snapshotFactory.getSnapshot(snapshot.getId(), store); if (snapOnStore == null) { - throw new CloudRuntimeException("Cannot find an entry in snapshot_store_ref for snapshot " + snapshotId + " on region store: " + store.getName()); + throw new CloudRuntimeException(String.format("Cannot find an entry in snapshot_store_ref for snapshot %s on region store: %s", snapshot, store)); } if (snapOnStore.getPath() == null || snapOnStore.getPath().length() == 0) { if (logger.isDebugEnabled()) { - logger.debug("sync snapshot " + snapshotId + " from cache to object store..."); + logger.debug("sync snapshot {} from cache to object store...", snapshot); } // snapshot is not on region store yet, sync to region store - SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshotId); + SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshot.getId()); if (srcSnapshot == null) { - throw new CloudRuntimeException("Cannot find snapshot " + snapshotId + " on cache store"); + throw new CloudRuntimeException(String.format("Cannot find snapshot %s on cache store", snapshot)); } AsyncCallFuture future = syncToRegionStoreAsync(srcSnapshot, store); try { SnapshotResult result = future.get(); if (result.isFailed()) { - throw new CloudRuntimeException("sync snapshot from cache to region wide store failed for image store " + store.getName() + ":" - + result.getResult()); + throw new CloudRuntimeException(String.format("sync snapshot from cache to region wide store failed for image store %s: %s", store, result.getResult())); } _cacheMgr.releaseCacheObject(srcSnapshot); // reduce reference count for template on cache, so it can recycled by schedule } catch (Exception ex) { - throw new CloudRuntimeException("sync snapshot from cache to region wide store failed for image store " + store.getName()); + throw new CloudRuntimeException(String.format("sync snapshot from cache to region wide store failed for image store %s", store)); } } @@ -723,7 +719,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _snapshotDao.remove(srcSnapshot.getId()); } catch (NoTransitionException ex) { logger.debug("Failed to create backup " + ex.toString()); - throw new CloudRuntimeException("Failed to backup snapshot" + snapshot.getId()); + throw new CloudRuntimeException(String.format("Failed to backup snapshot%s", snapshot)); } } }); @@ -769,7 +765,7 @@ public AsyncCallFuture queryCopySnapshot(SnapshotInfo snapshot) AsyncCallFuture future = new AsyncCallFuture<>(); EndPoint ep = epSelector.select(snapshot); if (ep == null) { - logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %d with store %d", snapshot.getId(), snapshot.getDataStore().getId())); + logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %s with store %s", snapshot.getSnapshotVO(), snapshot.getDataStore())); throw new ResourceUnavailableException("No secondary VM in running state in source snapshot zone", DataCenter.class, snapshot.getDataCenterId()); } DataStore store = snapshot.getDataStore(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index a621e8a076d1..79be65888995 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -299,7 +299,7 @@ public EndPoint select(DataObject srcData, DataObject destData, StorageAction ac @Override public EndPoint select(DataObject srcData, DataObject destData, StorageAction action, boolean encryptionRequired) { - logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary " + srcData.getId() + " dest=" + destData.getId()); + logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary {} dest={}", srcData, destData); if (action == StorageAction.BACKUPSNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { SnapshotInfo srcSnapshot = (SnapshotInfo)srcData; VolumeInfo volumeInfo = srcSnapshot.getBaseVolume(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index e582ae6b4c64..5b712b32a6ad 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -159,11 +159,13 @@ public StoragePoolVO getStoragePoolForVM(Long vmId) { VolumeVO rootVolume = rootVolumes.get(0); StoragePoolVO rootVolumePool = primaryDataStoreDao.findById(rootVolume.getPoolId()); if (rootVolumePool == null) { - throw new InvalidParameterValueException("Failed to find root volume storage pool for the user vm:" + vmId); + throw new InvalidParameterValueException(String.format( + "Failed to find storage pool for root volume %s for the user vm: %d", rootVolume, vmId)); } if (rootVolumePool.isInMaintenance()) { - throw new InvalidParameterValueException("Storage pool for the user vm:" + vmId + " is in maintenance"); + throw new InvalidParameterValueException(String.format( + "Storage pool %s for root volume %s of the user vm: %d is in maintenance", rootVolumePool, rootVolume, vmId)); } return rootVolumePool; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index db3f798a68a7..a2e9eff2a08a 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -177,19 +177,19 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal if (data.getType() == DataObjectType.TEMPLATE) { caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading template to data store " + dataStore.getId()); + logger.debug("Downloading template to data store {}", dataStore); } _downloadMonitor.downloadTemplateToStorage(data, caller); } else if (data.getType() == DataObjectType.VOLUME) { caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading volume to data store " + dataStore.getId()); + logger.debug("Downloading volume to data store {}", dataStore); } _downloadMonitor.downloadVolumeToStorage(data, caller); } else if (data.getType() == DataObjectType.SNAPSHOT) { caller.setCallback(caller.getTarget().createSnapshotAsyncCallback(null, null)); if (logger.isDebugEnabled()) { - logger.debug("Downloading volume to data store " + dataStore.getId()); + logger.debug("Downloading snapshot to data store {}", dataStore); } _downloadMonitor.downloadSnapshotToStorage(data, caller); } @@ -212,7 +212,7 @@ protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher eps, CopyCommand cmd) { answer = agentMgr.send(endPoint.getId(), cmd); answer.setContextParam("cmd", cmdExecId.toString()); return answer; - } catch (AgentUnavailableException e) { + } catch (AgentUnavailableException | OperationTimedoutException e) { errMsg = e.toString(); - logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); - } catch (OperationTimedoutException e) { - errMsg = e.toString(); - logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent [id: {}, uuid: {}]: {}", endPoint.getId(), endPoint.getUuid(), e.toString()); } - throw new CloudRuntimeException("Failed to send command, due to Agent:" + endPoint.getId() + ", " + errMsg); + throw new CloudRuntimeException(String.format("Failed to send command, due to Agent: [id: %s, uuid: %s], %s", endPoint.getId(), endPoint.getUuid(), errMsg)); } @Override @@ -507,7 +504,7 @@ public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String pa Answer answer = null; String errMsg = null; if (logger.isDebugEnabled()) { - logger.debug("Create Datadisk template: " + dataDiskTemplate.getId()); + logger.debug("Create Datadisk template: {}", dataDiskTemplate); } CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable); EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore()); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index e4c269326198..7f28224a3168 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -26,6 +26,7 @@ import javax.inject.Inject; +import com.cloud.dc.dao.ClusterDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.logging.log4j.Logger; @@ -74,6 +75,8 @@ public class PrimaryDataStoreHelper { @Inject protected StoragePoolHostDao storagePoolHostDao; @Inject + protected ClusterDao clusterDao; + @Inject private AnnotationDao annotationDao; public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { @@ -266,7 +269,7 @@ public boolean deletePrimaryDataStore(DataStore store) { this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, poolVO.getId()); txn.commit(); - logger.debug("Storage pool id=" + poolVO.getId() + " is removed successfully"); + logger.debug("Storage pool {} is removed successfully", poolVO); return true; } @@ -286,7 +289,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _capacityDao.update(capacity.getId(), capacity); } }); - logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to zone"); + logger.debug("Scope of storage pool {} is changed to zone", pool); } public void switchToCluster(DataStore store, ClusterScope clusterScope) { @@ -312,6 +315,6 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _capacityDao.update(capacity.getId(), capacity); } }); - logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to cluster id=" + clusterScope.getScopeId()); + logger.debug("Scope of storage pool {} is changed to cluster {}", pool::toString, () -> clusterDao.findById(clusterScope.getScopeId())); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 3ca1d9201db8..0bd4b5d8bc07 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,6 +32,7 @@ import javax.inject.Inject; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; @@ -163,6 +164,8 @@ public class VolumeServiceImpl implements VolumeService { @Inject VolumeDao volDao; @Inject + VMInstanceDao vmDao; + @Inject PrimaryDataStoreProviderManager dataStoreMgr; @Inject DataMotionService motionSrv; @@ -378,7 +381,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { if (volume.getDataStore() == null) { logger.info("Expunge volume with no data store specified"); if (canVolumeBeRemoved(volume.getId())) { - logger.info("Volume " + volume.getId() + " is not referred anywhere, remove it from volumes table"); + logger.info("Volume {} is not referred anywhere, remove it from volumes table", volume); volDao.remove(volume.getId()); } future.complete(result); @@ -389,7 +392,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { VolumeDataStoreVO volumeStore = _volumeStoreDao.findByVolume(volume.getId()); if (volumeStore != null) { if (volumeStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { - String msg = "Volume: " + volume.getName() + " is currently being uploaded; can't delete it."; + String msg = String.format("Volume: %s is currently being uploaded; can't delete it.", volume); logger.debug(msg); result.setSuccess(false); result.setResult(msg); @@ -400,7 +403,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { VolumeVO vol = volDao.findById(volume.getId()); if (vol == null) { - logger.debug("Volume " + volume.getId() + " is not found"); + logger.debug("Volume {} is not found", volume); future.complete(result); return future; } @@ -484,7 +487,7 @@ public Void deleteVolumeCallback(AsyncCallbackDispatcher> targets) { + private void removeDynamicTargets(Host host, List> targets) { ModifyTargetsCommand cmd = new ModifyTargetsCommand(); cmd.setTargets(targets); @@ -1088,18 +1091,18 @@ private void removeDynamicTargets(long hostId, List> targets cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } @@ -1117,12 +1120,12 @@ private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, Templa VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), volumeInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + templateOnPrimary.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", templateOnPrimary.getImage(), destPrimaryDataStore)); } //XXX: not sure if this the right thing to do here. We can always fallback to the "copy from sec storage" if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { - throw new CloudRuntimeException("Template " + templateOnPrimary.getUniqueName() + " has not been downloaded to primary storage."); + throw new CloudRuntimeException(String.format("Template %s has not been downloaded to primary storage.", templateOnPrimary.getImage())); } try { @@ -1149,7 +1152,7 @@ private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateOnPrimary.getUniqueName() + " in storage pool " + srcTemplateOnPrimary.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateOnPrimary.getUniqueName(), srcTemplateOnPrimary)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1162,7 +1165,7 @@ private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, try { grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost)); } _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); @@ -1211,7 +1214,7 @@ private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, try { destroyAndReallocateManagedVolume(volumeInfo); } catch (CloudRuntimeException ex) { - logger.warn("Failed to destroy managed volume: " + volumeInfo.getId()); + logger.warn("Failed to destroy managed volume: {}", volumeInfo); errMsg += " : " + ex.getMessage(); } @@ -1248,21 +1251,21 @@ private void destroyAndReallocateManagedVolume(VolumeInfo volumeInfo) { VolumeVO newVolume = (VolumeVO) newVol; newVolume.set_iScsiName(null); volDao.update(newVolume.getId(), newVolume); - logger.debug("Allocated new volume: " + newVolume.getId() + " for the VM: " + volume.getInstanceId()); + logger.debug("Allocated new volume: {} for the VM: {}", newVolume::toString, () -> (volume.getInstanceId() != null ? vmDao.findById(volume.getInstanceId()) : null)); try { AsyncCallFuture expungeVolumeFuture = expungeVolumeAsync(volumeInfo); VolumeApiResult expungeVolumeResult = expungeVolumeFuture.get(); if (expungeVolumeResult.isFailed()) { - logger.warn("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); - throw new CloudRuntimeException("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); + logger.warn("Failed to expunge volume: {} that was created", volumeInfo); + throw new CloudRuntimeException(String.format("Failed to expunge volume: %s that was created", volumeInfo.getVolume())); } } catch (Exception ex) { if (canVolumeBeRemoved(volumeInfo.getId())) { volDao.remove(volumeInfo.getId()); } - logger.warn("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); - throw new CloudRuntimeException("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); + logger.warn("Unable to expunge volume: {} due to: {}", volumeInfo, ex.getMessage()); + throw new CloudRuntimeException(String.format("Unable to expunge volume: %s due to: %s", volumeInfo.getVolume(), ex.getMessage())); } } @@ -1382,12 +1385,14 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + throw new CloudRuntimeException(String.format("Failed to create template %s on primary storage: %s", + srcTemplateInfo.getImage(), destPrimaryDataStore)); } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", + srcTemplateInfo.getImage(), destPrimaryDataStore)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1407,7 +1412,7 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost)); } templateOnPrimary.processEvent(Event.CopyingRequested); @@ -1416,8 +1421,8 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa //Download and copy template to the managed volume TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId); if (templateOnPrimaryNow == null) { - logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); - throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + logger.debug("Failed to prepare ready bypassed template: {} on primary storage: {}", srcTemplateInfo, templateOnPrimary); + throw new CloudRuntimeException(String.format("Failed to prepare ready bypassed template: %s on primary storage: %s", srcTemplateInfo, templateOnPrimary)); } templateOnPrimary.processEvent(Event.OperationSuccessed); return templateOnPrimaryNow; @@ -1459,7 +1464,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs AsyncCallFuture future = new AsyncCallFuture<>(); if (storageCanCloneVolume && computeSupportsVolumeClone) { - logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); + logger.debug("Storage {} can support cloning using a cached template and compute side is OK with volume cloning.", destPrimaryDataStore); GlobalLock lock = null; TemplateInfo templateOnPrimary = null; @@ -1483,7 +1488,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + throw new CloudRuntimeException(String.format("Failed to create template %s on primary storage: %s", srcTemplateInfo, destPrimaryDataStore)); } } @@ -1491,7 +1496,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateInfo, destPrimaryDataStore)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { @@ -1516,7 +1521,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { // We have a template on primary storage. Clone it to new volume. - logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + logger.debug("Creating a clone from template on primary storage {}", destPrimaryDataStore); createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { @@ -1848,13 +1853,8 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto if (logger.isDebugEnabled()) { String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : ""); - String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)" - , srcVolume.getName() - , srcVolume.getId() - , srcRole - , destStore.getName() - , destStore.getId() - , destStore.getRole()); + String msg = String.format("copying %s (role=%s) to %s (role=%s)", + srcVolume, srcRole, destStore, destStore.getRole()); logger.debug(msg); } @@ -1917,7 +1917,7 @@ protected Void copyVolumeCallBack(AsyncCallbackDispatcher destroyFuture = expungeVolumeAsync(sourceVolume); VolumeApiResult volumeApiResult = destroyFuture.get(); + StoragePoolVO pool = storagePoolDao.findById(sourceVolumeVo.getPoolId()); if (volumeApiResult.isSuccess()) { - logger.debug(String.format("%s on storage [%s] was cleaned up successfully.", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId())); + logger.debug("{} on storage [{}] was cleaned up successfully.", sourceVolumeVo, pool); return; } - String message = String.format("Failed to clean up %s on storage [%s] due to [%s].", sourceVolumeVo.getVolumeDescription(), sourceVolumeVo.getPoolId(), - volumeApiResult.getResult()); + String message = String.format("Failed to clean up %s on storage [%s] due to [%s].", + sourceVolumeVo, pool, volumeApiResult.getResult()); if (!retryExpungeVolumeAsync) { logger.warn(message); @@ -2059,7 +2060,7 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, AsyncCallFuture createVolumeFuture = createVolumeAsync(destVolume, destStore); VolumeApiResult createVolumeResult = createVolumeFuture.get(); if (createVolumeResult.isFailed()) { - logger.debug("Failed to create dest volume " + destVolume.getId() + ", volume can be removed"); + logger.debug("Failed to create dest volume {}, volume can be removed", destVolume); destroyVolume(destVolume.getId()); destVolume.processEvent(Event.ExpungeRequested); destVolume.processEvent(Event.OperationSuccessed); @@ -2204,14 +2205,12 @@ private boolean requiresNewManagedVolumeInDestStore(PrimaryDataStore srcDataStor } if (StringUtils.isAnyEmpty(srcPoolSystemId, destPoolSystemId)) { - logger.warn("PowerFlex src pool: " + srcDataStore.getId() + " or dest pool: " + destDataStore.getId() + - " storage instance details are not available"); + logger.warn("PowerFlex src pool: {} or dest pool: {} storage instance details are not available", srcDataStore, destDataStore); return false; } if (!srcPoolSystemId.equals(destPoolSystemId)) { - logger.debug("PowerFlex src pool: " + srcDataStore.getId() + " and dest pool: " + destDataStore.getId() + - " belongs to different storage instances, create new managed volume"); + logger.debug("PowerFlex src pool: {} and dest pool: {} belongs to different storage instances, create new managed volume", srcDataStore, destDataStore); return true; } } @@ -2407,7 +2406,7 @@ public Pair registerVolumeForPostUpload(VolumeInfo volume, EndPoint ep = _epSelector.select(store); if (ep == null) { - String errorMessage = "There is no secondary storage VM for image store " + store.getName(); + String errorMessage = String.format("There is no secondary storage VM for image store %s", store); logger.warn(errorMessage); throw new CloudRuntimeException(errorMessage); } @@ -2439,7 +2438,7 @@ protected Void registerVolumeCallback(AsyncCallbackDispatcher listVolume(DataStore store) { return tanswer.getTemplateInfo(); } else { if (logger.isDebugEnabled()) { - logger.debug("Can not list volumes for image store " + store.getId()); + logger.debug("Can not list volumes for image store {}", store); } } @@ -2776,11 +2778,11 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) { try { snapshot = snapshotMgr.takeSnapshot(volume); } catch (CloudRuntimeException cre) { - logger.error("Take snapshot: " + volume.getId() + " failed", cre); + logger.error("Take snapshot: {} failed", volume, cre); throw cre; } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug("unknown exception while taking snapshot for volume " + volume.getId() + " was caught", e); + logger.debug("unknown exception while taking snapshot for volume {} was caught", volume, e); } throw new CloudRuntimeException("Failed to take snapshot", e); } @@ -2793,7 +2795,7 @@ public void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host) if (HypervisorType.KVM.equals(host.getHypervisorType()) && DataObjectType.VOLUME.equals(dataObject.getType())) { VolumeInfo volumeInfo = volFactory.getVolume(dataObject.getId()); if (VolumeApiServiceImpl.AllowCheckAndRepairVolume.valueIn(volumeInfo.getPoolId())) { - logger.info(String.format("Trying to check and repair the volume %d", dataObject.getId())); + logger.info("Trying to check and repair the volume {}", dataObject); String repair = CheckAndRepairVolumeCmd.RepairValues.LEAKS.name().toLowerCase(); CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(repair); volumeInfo.addPayload(payload); @@ -2904,9 +2906,8 @@ public void moveVolumeOnSecondaryStorageToAnotherAccount(Volume volume, Account logger.debug(String.format("Volume [%s] is not present in the secondary storage. Therefore we do not need to move it in the secondary storage.", volume)); return; } - logger.debug(String.format("Volume [%s] is present in secondary storage. It will be necessary to move it from the source account's [%s] folder to the destination " - + "account's [%s] folder.", - volume.getUuid(), sourceAccount, destAccount)); + logger.debug("Volume [{}] is present in secondary storage. It will be necessary to move it from the source account's [{}] folder to the destination " + + "account's [{}] folder.", volume, sourceAccount, destAccount); VolumeInfo volumeInfo = volFactory.getVolume(volume.getId(), DataStoreRole.Image); String datastoreUri = volumeInfo.getDataStore().getUri(); @@ -2922,17 +2923,17 @@ public void moveVolumeOnSecondaryStorageToAnotherAccount(Volume volume, Account if (!answer.getResult()) { String msg = String.format("Unable to move volume [%s] from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage, due " + "to [%s].", - volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); + volume, srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } - logger.debug(String.format("Volume [%s] was moved from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage.", - volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount)); + logger.debug("Volume [{}] was moved from [{}] (source account's [{}] folder) to [{}] (destination account's [{}] folder) in the secondary storage.", + volume, srcPath.getParent(), sourceAccount, destPath, destAccount); volumeStore.setInstallPath(String.format("%s/%s", destPath, srcPath.getFileName().toString())); if (!_volumeStoreDao.update(volumeStore.getId(), volumeStore)) { - String msg = String.format("Unable to update volume [%s] install path in the DB.", volumeStore.getVolumeId()); + String msg = String.format("Unable to update volume [%s] install path in the DB.", volume); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java index 6b85ae27f58a..36c0fb72c36c 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java @@ -386,6 +386,7 @@ public void setRemoved(final Date removed) { public String toString() { StringBuffer sb = new StringBuffer(); sb.append("AsyncJobVO: {id:").append(getId()); + sb.append(", uuid: ").append(getUuid()); sb.append(", userId: ").append(getUserId()); sb.append(", accountId: ").append(getAccountId()); sb.append(", instanceType: ").append(getInstanceType()); From a639d3c47dc7c64a4ff47ee35ace15ff969154a2 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Thu, 14 Nov 2024 18:17:05 +0530 Subject: [PATCH 12/22] Improve logging to include more identifiable information for server --- .../cloud/agent/api/to/LoadBalancerTO.java | 24 +- .../java/com/cloud/network/Ipv6Service.java | 2 +- .../com/cloud/network/NetworkProfile.java | 5 + .../cloud/network/lb/LoadBalancingRule.java | 4 + .../network/vpn/RemoteAccessVpnService.java | 2 +- .../ha/GlobalLoadBalancingRulesService.java | 3 +- .../command/user/vpn/RemoveVpnUserCmd.java | 2 +- .../cloudstack/vm/UnmanagedInstanceTO.java | 10 + .../agent/api/to/LoadBalancerTOTest.java | 20 +- .../configuration/ConfigurationManager.java | 4 +- .../com/cloud/network/IpAddressManager.java | 5 +- .../network/lb/LoadBalancingRulesManager.java | 4 +- .../cloud/network/rules/FirewallManager.java | 6 +- .../security/SecurityGroupManager.java | 5 +- .../com/cloud/network/vpc/VpcManager.java | 3 + .../com/cloud/resource/ResourceManager.java | 2 +- .../com/cloud/template/TemplateManager.java | 4 +- .../com/cloud/vm/VmWorkJobHandlerProxy.java | 2 - .../orchestration/NetworkOrchestrator.java | 35 +- .../orchestration/VolumeOrchestrator.java | 4 +- .../src/main/java/com/cloud/dc/VlanVO.java | 7 +- .../main/java/com/cloud/domain/DomainVO.java | 5 +- .../cloud/network/LBHealthCheckPolicyVO.java | 8 + .../cloud/network/as/AutoScalePolicyVO.java | 5 +- .../cloud/network/as/AutoScaleVmGroupVO.java | 1 + .../network/as/AutoScaleVmProfileVO.java | 5 +- .../com/cloud/network/as/ConditionVO.java | 5 +- .../java/com/cloud/network/as/CounterVO.java | 5 +- .../dao/ExternalLoadBalancerDeviceVO.java | 8 + .../com/cloud/network/dao/IPAddressVO.java | 5 +- .../network/dao/LBStickinessPolicyVO.java | 8 + .../com/cloud/network/dao/LoadBalancerVO.java | 8 + .../dao/PhysicalNetworkServiceProviderVO.java | 8 + .../cloud/network/dao/PhysicalNetworkVO.java | 8 + .../cloud/network/dao/RemoteAccessVpnVO.java | 6 + .../dao/Site2SiteCustomerGatewayVO.java | 8 + .../network/dao/Site2SiteVpnGatewayVO.java | 8 + .../network/security/SecurityGroupRuleVO.java | 9 + .../network/security/SecurityGroupVO.java | 9 + .../cloud/network/vpc/NetworkACLItemVO.java | 5 +- .../com/cloud/network/vpc/NetworkACLVO.java | 2 +- .../com/cloud/network/vpc/VpcGatewayVO.java | 7 +- .../com/cloud/network/vpc/VpcOfferingVO.java | 6 +- .../cloud/projects/ProjectInvitationVO.java | 7 +- .../java/com/cloud/projects/ProjectVO.java | 4 +- .../com/cloud/storage/DiskOfferingVO.java | 8 + .../com/cloud/storage/SnapshotPolicyVO.java | 8 + .../com/cloud/storage/SnapshotScheduleVO.java | 15 +- .../java/com/cloud/user/UserAccountVO.java | 7 + .../src/main/java/com/cloud/user/UserVO.java | 2 +- .../java/com/cloud/vm/InstanceGroupVO.java | 7 + .../com/cloud/vm/dao/NicSecondaryIpVO.java | 9 + .../org/apache/cloudstack/acl/RoleVO.java | 2 +- .../cloudstack/backup/BackupScheduleVO.java | 7 + .../apache/cloudstack/backup/BackupVO.java | 7 + .../download/DirectDownloadCertificateVO.java | 8 + .../lb/ApplicationLoadBalancerRuleVO.java | 8 + .../region/gslb/GlobalLoadBalancerRuleVO.java | 10 + ...NonManagedStorageSystemDataMotionTest.java | 1 - .../storage/volume/VolumeServiceImpl.java | 2 +- .../storage/volume/VolumeServiceTest.java | 6 + .../network/element/CiscoVnmcElement.java | 70 ++- .../network/lb/LoadBalanceRuleHandler.java | 2 +- .../cloud/acl/AffinityGroupAccessChecker.java | 4 +- .../java/com/cloud/acl/DomainChecker.java | 24 +- .../allocator/impl/FirstFitAllocator.java | 15 +- .../allocator/impl/RecreateHostAllocator.java | 2 +- .../impl/UserConcentratedAllocator.java | 25 +- .../com/cloud/alert/ClusterAlertAdapter.java | 10 +- .../main/java/com/cloud/api/ApiServer.java | 10 +- .../com/cloud/api/query/QueryManagerImpl.java | 8 +- .../api/query/vo/NetworkOfferingJoinVO.java | 7 + .../ConfigurationManagerImpl.java | 227 ++++----- .../AgentBasedConsoleProxyManager.java | 16 +- ...entBasedStandaloneConsoleProxyManager.java | 22 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 77 ++- .../consoleproxy/ConsoleProxyService.java | 3 +- .../StaticConsoleProxyManager.java | 2 +- .../com/cloud/dc/DedicatedResourceVO.java | 8 + .../deploy/DeploymentPlanningManagerImpl.java | 2 +- .../com/cloud/deploy/FirstFitPlanner.java | 8 +- .../cloud/ha/AbstractInvestigatorImpl.java | 15 +- .../cloud/ha/HighAvailabilityManagerImpl.java | 58 +-- .../ha/ManagementIPSystemVMInvestigator.java | 8 +- .../com/cloud/ha/UserVmDomRInvestigator.java | 27 +- .../CloudZonesStartupProcessor.java | 4 +- .../cloud/hypervisor/HypervisorGuruBase.java | 2 +- .../java/com/cloud/hypervisor/KVMGuru.java | 15 +- .../hypervisor/kvm/dpdk/DpdkHelperImpl.java | 7 +- .../ExternalDeviceUsageManagerImpl.java | 10 +- .../ExternalFirewallDeviceManagerImpl.java | 25 +- ...ExternalLoadBalancerDeviceManagerImpl.java | 52 +- .../cloud/network/IpAddressManagerImpl.java | 110 ++-- .../cloud/network/Ipv6AddressManagerImpl.java | 15 +- .../com/cloud/network/Ipv6ServiceImpl.java | 37 +- .../network/NetworkMigrationManagerImpl.java | 14 +- .../com/cloud/network/NetworkModelImpl.java | 62 ++- .../com/cloud/network/NetworkServiceImpl.java | 129 +++-- .../cloud/network/SshKeysDistriMonitor.java | 11 +- .../cloud/network/as/AutoScaleManager.java | 5 +- .../network/as/AutoScaleManagerImpl.java | 220 ++++---- .../element/ConfigDriveNetworkElement.java | 31 +- .../network/element/VirtualRouterElement.java | 25 +- .../element/VpcVirtualRouterElement.java | 39 +- .../network/firewall/FirewallManagerImpl.java | 48 +- .../cloud/network/guru/DirectNetworkGuru.java | 6 +- .../guru/DirectPodBasedNetworkGuru.java | 6 +- .../cloud/network/guru/GuestNetworkGuru.java | 13 +- .../lb/LoadBalancingRulesManagerImpl.java | 177 ++++--- .../network/router/CommandSetupHelper.java | 2 +- .../network/router/NetworkHelperImpl.java | 49 +- ...VpcVirtualNetworkApplianceManagerImpl.java | 18 +- .../cloud/network/rules/DhcpSubNetRules.java | 2 +- .../network/rules/PrivateGatewayRules.java | 2 +- .../cloud/network/rules/RulesManagerImpl.java | 83 ++- .../network/rules/VpcIpAssociationRules.java | 4 +- .../security/SecurityGroupManagerImpl.java | 80 +-- .../security/SecurityGroupManagerImpl2.java | 11 +- .../network/vpc/NetworkACLManagerImpl.java | 24 +- .../network/vpc/NetworkACLServiceImpl.java | 18 +- .../com/cloud/network/vpc/VpcManagerImpl.java | 100 ++-- .../VpcPrivateGatewayTransactionCallable.java | 5 +- .../vpn/RemoteAccessVpnManagerImpl.java | 21 +- .../network/vpn/Site2SiteVpnManagerImpl.java | 26 +- .../com/cloud/projects/ProjectManager.java | 2 +- .../cloud/projects/ProjectManagerImpl.java | 73 ++- .../cloud/resource/ResourceManagerImpl.java | 148 +++--- .../resourceicon/ResourceIconManagerImpl.java | 4 +- .../ResourceLimitManagerImpl.java | 7 +- .../cloud/server/ConfigurationServerImpl.java | 4 +- .../cloud/server/ManagementServerImpl.java | 20 +- .../java/com/cloud/server/StatsCollector.java | 29 +- .../cloud/servlet/ConsoleProxyServlet.java | 26 +- .../cloud/storage/ImageStoreServiceImpl.java | 6 +- .../com/cloud/storage/OCFS2ManagerImpl.java | 10 +- .../com/cloud/storage/StorageManagerImpl.java | 2 +- .../storage/StoragePoolAutomationImpl.java | 7 +- .../cloud/storage/VolumeApiServiceImpl.java | 138 +++-- .../storage/download/DownloadListener.java | 4 +- .../storage/download/DownloadMonitorImpl.java | 6 +- .../storage/snapshot/SnapshotManager.java | 3 +- .../storage/snapshot/SnapshotManagerImpl.java | 114 ++--- .../snapshot/SnapshotSchedulerImpl.java | 42 +- .../cloud/storage/upload/UploadListener.java | 4 +- .../storage/upload/UploadMonitorImpl.java | 10 +- .../cloud/tags/TaggedResourceManagerImpl.java | 16 +- .../template/HypervisorTemplateAdapter.java | 38 +- .../cloud/template/TemplateAdapterBase.java | 2 +- .../cloud/template/TemplateManagerImpl.java | 93 ++-- .../com/cloud/usage/UsageServiceImpl.java | 2 +- .../com/cloud/user/AccountManagerImpl.java | 229 ++++----- .../com/cloud/user/DomainManagerImpl.java | 47 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 482 +++++++++--------- .../vm/snapshot/VMSnapshotManagerImpl.java | 46 +- .../acl/ProjectRoleManagerImpl.java | 2 +- .../cloudstack/acl/RoleManagerImpl.java | 4 +- .../affinity/AffinityGroupServiceImpl.java | 8 +- .../agent/lb/IndirectAgentLBServiceImpl.java | 6 +- .../cloudstack/backup/BackupManagerImpl.java | 40 +- .../apache/cloudstack/ca/CAManagerImpl.java | 8 +- .../ConsoleAccessManagerImpl.java | 29 +- .../diagnostics/DiagnosticsServiceImpl.java | 18 +- .../download/DirectDownloadManagerImpl.java | 91 ++-- .../apache/cloudstack/ha/HAManagerImpl.java | 10 +- .../provider/host/HAAbstractHostProvider.java | 2 +- .../apache/cloudstack/ha/task/BaseHATask.java | 2 +- .../ApplicationLoadBalancerManagerImpl.java | 5 +- .../RouterDeploymentDefinition.java | 12 +- .../VpcRouterDeploymentDefinition.java | 8 +- .../topology/AdvancedNetworkTopology.java | 10 +- .../topology/BasicNetworkTopology.java | 22 +- .../OutOfBandManagementServiceImpl.java | 14 +- .../PowerOperationTask.java | 3 +- .../GlobalLoadBalancingRulesServiceImpl.java | 19 +- .../cloudstack/snapshot/SnapshotHelper.java | 25 +- .../vm/UnmanagedVMsManagerImpl.java | 111 ++-- .../ConfigurationManagerTest.java | 2 +- .../ha/HighAvailabilityManagerImplTest.java | 2 - .../cloud/network/Ipv6ServiceImplTest.java | 37 +- .../network/MockFirewallManagerImpl.java | 4 +- .../cloud/network/NetworkServiceImplTest.java | 2 +- .../network/as/AutoScaleManagerImplTest.java | 46 +- .../element/VpcVirtualRouterElementTest.java | 6 +- .../projects/MockProjectManagerImpl.java | 2 +- .../resource/MockResourceManagerImpl.java | 2 +- .../storage/VolumeApiServiceImplTest.java | 3 - .../cloud/user/AccountManagerImplTest.java | 7 +- .../com/cloud/user/DomainManagerImplTest.java | 6 +- .../com/cloud/vm/UserVmManagerImplTest.java | 2 +- .../java/com/cloud/vm/UserVmManagerTest.java | 1 - .../vm/snapshot/VMSnapshotManagerTest.java | 8 +- .../vpc/MockConfigurationManagerImpl.java | 4 +- .../RouterDeploymentDefinitionTest.java | 6 + 193 files changed, 2537 insertions(+), 2234 deletions(-) diff --git a/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java b/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java index 966d24886fef..f395f26aeed6 100644 --- a/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/LoadBalancerTO.java @@ -374,13 +374,15 @@ public String getMonitorState() { public static class CounterTO implements Serializable { private static final long serialVersionUID = 2L; private final Long id; + private final String uuid; private final String name; private final Counter.Source source; private final String value; private final String provider; - public CounterTO(Long id, String name, Counter.Source source, String value, String provider) { + public CounterTO(Long id, String uuid, String name, Counter.Source source, String value, String provider) { this.id = id; + this.uuid = uuid; this.name = name; this.source = source; this.value = value; @@ -391,6 +393,10 @@ public Long getId() { return id; } + public String getUuid() { + return uuid; + } + public String getName() { return name; } @@ -411,12 +417,14 @@ public String getProvider() { public static class ConditionTO implements Serializable { private static final long serialVersionUID = 2L; private final Long id; + private final String uuid; private final long threshold; private final Condition.Operator relationalOperator; private final CounterTO counter; - public ConditionTO(Long id, long threshold, Condition.Operator relationalOperator, CounterTO counter) { + public ConditionTO(Long id, String uuid, long threshold, Condition.Operator relationalOperator, CounterTO counter) { this.id = id; + this.uuid = uuid; this.threshold = threshold; this.relationalOperator = relationalOperator; this.counter = counter; @@ -426,6 +434,10 @@ public Long getId() { return id; } + public String getUuid() { + return uuid; + } + public long getThreshold() { return threshold; } @@ -442,6 +454,7 @@ public CounterTO getCounter() { public static class AutoScalePolicyTO implements Serializable { private static final long serialVersionUID = 2L; private final long id; + private final String uuid; private final int duration; private final int quietTime; private final Date lastQuietTime; @@ -449,8 +462,9 @@ public static class AutoScalePolicyTO implements Serializable { boolean revoked; private final List conditions; - public AutoScalePolicyTO(long id, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List conditions, boolean revoked) { + public AutoScalePolicyTO(long id, String uuid, int duration, int quietTime, Date lastQuietTime, AutoScalePolicy.Action action, List conditions, boolean revoked) { this.id = id; + this.uuid = uuid; this.duration = duration; this.quietTime = quietTime; this.lastQuietTime = lastQuietTime; @@ -463,6 +477,10 @@ public long getId() { return id; } + public String getUuid() { + return uuid; + } + public int getDuration() { return duration; } diff --git a/api/src/main/java/com/cloud/network/Ipv6Service.java b/api/src/main/java/com/cloud/network/Ipv6Service.java index 2b4dff01086e..4ef5f98c38d8 100644 --- a/api/src/main/java/com/cloud/network/Ipv6Service.java +++ b/api/src/main/java/com/cloud/network/Ipv6Service.java @@ -58,7 +58,7 @@ public interface Ipv6Service extends PluggableService, Configurable { Pair getUsedTotalIpv6SubnetForZone(long zoneId); - Pair preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException; + Pair preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException; void assignIpv6SubnetToNetwork(String subnet, long networkId); diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 83dc247cc9ee..641c67a39daf 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -384,4 +384,9 @@ public Integer getNetworkCidrSize() { return networkCidrSize; } + @Override + public String toString() { + return String.format("NetworkProfile {\"id\": %s, \"name\": \"%s\", \"uuid\": \"%s\", \"networkofferingid\": %d}", id, name, uuid, networkOfferingId); + } + } diff --git a/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java b/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java index 64b2aeedf128..e4cf4ec526f0 100644 --- a/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java +++ b/api/src/main/java/com/cloud/network/lb/LoadBalancingRule.java @@ -63,6 +63,10 @@ public long getId() { return lb.getId(); } + public LoadBalancer getLb() { + return lb; + } + public String getName() { return lb.getName(); } diff --git a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java index bbb9771d27aa..ffa8af4576d7 100644 --- a/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java +++ b/api/src/main/java/com/cloud/network/vpn/RemoteAccessVpnService.java @@ -39,7 +39,7 @@ public interface RemoteAccessVpnService { VpnUser addVpnUser(long vpnOwnerId, String userName, String password); - boolean removeVpnUser(long vpnOwnerId, String userName, Account caller); + boolean removeVpnUser(Account vpnOwner, String userName, Account caller); List listVpnUsers(long vpnOwnerId, String userName); diff --git a/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java b/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java index ab6e6fb6c5ae..3b61367e3b4d 100644 --- a/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java +++ b/api/src/main/java/com/cloud/region/ha/GlobalLoadBalancingRulesService.java @@ -19,6 +19,7 @@ import java.util.List; +import com.cloud.user.Account; import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.DeleteGlobalLoadBalancerRuleCmd; @@ -39,7 +40,7 @@ public interface GlobalLoadBalancingRulesService { GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalancerRuleCmd updateGslbCmd); - boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException; + boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException; /* * methods for managing sites participating in global load balancing diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java index 48e7a9ee5193..0697987b04de 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java @@ -104,7 +104,7 @@ public String getEventType() { public void execute() { Account owner = _accountService.getAccount(getEntityOwnerId()); long ownerId = owner.getId(); - boolean result = _ravService.removeVpnUser(ownerId, userName, CallContext.current().getCallingAccount()); + boolean result = _ravService.removeVpnUser(owner, userName, CallContext.current().getCallingAccount()); if (!result) { String errorMessage = String.format("Failed to remove VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); logger.error(errorMessage); diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 5697a040b811..95904483391c 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -179,6 +179,16 @@ public void setVncPassword(String vncPassword) { this.vncPassword = vncPassword; } + @Override + public String toString() { + return "UnmanagedInstanceTO{" + + "name='" + name + '\'' + + ", internalCSName='" + internalCSName + '\'' + + ", hostName='" + hostName + '\'' + + ", clusterName='" + clusterName + '\'' + + '}'; + } + public static class Disk { private String diskId; diff --git a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java index b12c1b81d4a3..8fef19ef3b62 100644 --- a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java +++ b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java @@ -41,16 +41,19 @@ public class LoadBalancerTOTest { LoadBalancerTO.AutoScaleVmGroupTO vmGroup; private static final Long counterId = 1L; + private static final String counterUiid = "1111-1111-1100"; private static final String counterName = "counter name"; private static final Counter.Source counterSource = Counter.Source.CPU; private static final String counterValue = "counter value"; private static final String counterProvider = "VIRTUALROUTER"; private static final Long conditionId = 2L; + private static final String conditionUiid = "1111-1111-1110"; private static final Long threshold = 100L; private static final Condition.Operator relationalOperator = Condition.Operator.GT; private static final Long scaleUpPolicyId = 11L; + private static final String scaleUpPolicyUiid = "1111-1111-1111"; private static final int scaleUpPolicyDuration = 61; private static final int scaleUpPolicyQuietTime = 31; private static final Date scaleUpPolicyLastQuietTime = new Date(); @@ -85,14 +88,14 @@ public class LoadBalancerTOTest { @Before public void setUp() { - counter = new LoadBalancerTO.CounterTO(counterId, counterName, counterSource, counterValue, counterProvider); - condition = new LoadBalancerTO.ConditionTO(conditionId, threshold, relationalOperator, counter); - scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyDuration, scaleUpPolicyQuietTime, - scaleUpPolicyLastQuietTime, AutoScalePolicy.Action.SCALEUP, - Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); - scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleDownPolicyDuration, scaleDownPolicyQuietTime, - scaleDownPolicyLastQuietTime, AutoScalePolicy.Action.SCALEDOWN, - Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); + counter = new LoadBalancerTO.CounterTO(counterId, counterUiid, counterName, counterSource, counterValue, counterProvider); + condition = new LoadBalancerTO.ConditionTO(conditionId, conditionUiid, threshold, relationalOperator, counter); + scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyUiid, scaleUpPolicyDuration, + scaleUpPolicyQuietTime, scaleUpPolicyLastQuietTime, + AutoScalePolicy.Action.SCALEUP, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); + scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleUpPolicyUiid, scaleDownPolicyDuration, + scaleDownPolicyQuietTime, scaleDownPolicyLastQuietTime, + AutoScalePolicy.Action.SCALEDOWN, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); vmProfile = new LoadBalancerTO.AutoScaleVmProfileTO(zoneId, domainId, cloudStackApiUrl, autoScaleUserApiKey, autoScaleUserSecretKey, serviceOfferingId, templateId, vmName, networkId, otherDeployParams, counterParamList, expungeVmGracePeriod); @@ -113,6 +116,7 @@ public void testCounterTO() { @Test public void testConditionTO() { Assert.assertEquals(conditionId, condition.getId()); + Assert.assertEquals(conditionUiid, condition.getUuid()); Assert.assertEquals((long) threshold, condition.getThreshold()); Assert.assertEquals(relationalOperator, condition.getRelationalOperator()); Assert.assertEquals(counter, condition.getCounter()); diff --git a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java index 01fd54430d6e..1694b19c33fd 100644 --- a/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/main/java/com/cloud/configuration/ConfigurationManager.java @@ -238,7 +238,7 @@ Vlan createVlanAndPublicIpRange(long zoneId, long networkId, long physicalNetwor * @param domainId * @return success/failure */ - boolean releaseDomainSpecificVirtualRanges(long domainId); + boolean releaseDomainSpecificVirtualRanges(Domain domain); /** * Release dedicated virtual ip ranges of an account. @@ -246,7 +246,7 @@ Vlan createVlanAndPublicIpRange(long zoneId, long networkId, long physicalNetwor * @param accountId * @return success/failure */ - boolean releaseAccountSpecificVirtualRanges(long accountId); + boolean releaseAccountSpecificVirtualRanges(Account account); /** * Edits a pod in the database. Will not allow you to edit pods that are being used anywhere in the system. diff --git a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java index 36937460b205..b1cad20b19ec 100644 --- a/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/IpAddressManager.java @@ -19,6 +19,7 @@ import java.util.Date; import java.util.List; +import com.cloud.user.User; import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse; import org.apache.cloudstack.framework.config.ConfigKey; @@ -88,7 +89,7 @@ PublicIp assignSourceNatPublicIpAddress(long dcId, Long podId, Account owner, Vl * @param caller * @return true if it did; false if it didn't */ - boolean disassociatePublicIpAddress(long id, long userId, Account caller); + boolean disassociatePublicIpAddress(IpAddress ipAddress, long userId, Account caller); boolean applyRules(List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException; @@ -191,7 +192,7 @@ void transferPortableIP(long ipAddrId, long currentNetworkId, long newNetworkId) PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat) throws ConcurrentOperationException, InsufficientAddressCapacityException; - IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerId, DataCenter zone, Boolean displayIp, String ipaddress) + IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, User callerId, DataCenter zone, Boolean displayIp, String ipaddress) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException; PublicIp assignPublicIpAddressFromVlans(long dcId, Long podId, Account owner, VlanType type, List vlanDbIds, Long networkId, String requestedIp, String requestedGateway, boolean isSystem) diff --git a/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java b/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java index d61b446cad75..669456cbdcc2 100644 --- a/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/lb/LoadBalancingRulesManager.java @@ -62,7 +62,7 @@ LoadBalancer createPublicLoadBalancer(String xId, String name, String descriptio */ boolean removeVmFromLoadBalancers(long vmId); - boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException; + boolean applyLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException; String getLBCapability(long networkid, String capabilityName); @@ -74,7 +74,7 @@ LoadBalancer createPublicLoadBalancer(String xId, String name, String descriptio boolean configureLbAutoScaleVmGroup(long vmGroupid, AutoScaleVmGroup.State currentState) throws ResourceUnavailableException; - boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException; + boolean revokeLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException; boolean validateLbRule(LoadBalancingRule lbRule); diff --git a/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java b/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java index 0471086c43d5..1a79135f25ec 100644 --- a/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/rules/FirewallManager.java @@ -20,6 +20,8 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.firewall.FirewallService; import com.cloud.network.rules.FirewallRule.FirewallRuleType; @@ -53,7 +55,7 @@ void validateFirewallRule(Account caller, IPAddressVO ipAddress, Integer portSta public void revokeRule(FirewallRuleVO rule, Account caller, long userId, boolean needUsageEvent); - boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException; + boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException; // /** // * Revokes a firewall rule @@ -75,7 +77,7 @@ void validateFirewallRule(Account caller, IPAddressVO ipAddress, Integer portSta FirewallRule createRuleForAllCidrs(long ipAddrId, Account caller, Integer startPort, Integer endPort, String protocol, Integer icmpCode, Integer icmpType, Long relatedRuleId, long networkId) throws NetworkRuleConflictException; - boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException; + boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException; boolean revokeFirewallRulesForVm(long vmId); diff --git a/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java b/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java index ffca4bb013b6..6e2270ffb108 100644 --- a/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/security/SecurityGroupManager.java @@ -19,6 +19,7 @@ import java.util.HashMap; import java.util.List; +import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; /** @@ -36,9 +37,9 @@ public interface SecurityGroupManager { public SecurityGroupVO createDefaultSecurityGroup(Long accountId); - public boolean addInstanceToGroups(Long userVmId, List groups); + public boolean addInstanceToGroups(UserVm userVm, List groups); - public void removeInstanceFromGroups(long userVmId); + public void removeInstanceFromGroups(UserVm userVm); public void fullSync(long agentId, HashMap> newGroupStates); diff --git a/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java b/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java index a340f49c13f5..15158b72fab8 100644 --- a/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/vpc/VpcManager.java @@ -20,6 +20,7 @@ import java.util.Map; import java.util.Set; +import com.cloud.network.dao.IPAddressVO; import com.cloud.utils.Pair; import org.apache.cloudstack.acl.ControlledEntity.ACLType; @@ -82,6 +83,8 @@ public interface VpcManager { */ void unassignIPFromVpcNetwork(long ipId, long networkId); + void unassignIPFromVpcNetwork(final IPAddressVO ip, final Network network); + /** * Creates guest network in the VPC * diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index b2ae8b898378..343ad0fa2127 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -185,7 +185,7 @@ public interface ResourceManager extends ResourceService, Configurable { * @param vgpuType the VGPU type * @return true when the host has the capacity with given VGPU type */ - boolean isGPUDeviceAvailable(long hostId, String groupName, String vgpuType); + boolean isGPUDeviceAvailable(Host host, String groupName, String vgpuType); /** * Get available GPU device diff --git a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java index 997ae3985f18..b8912526fdf2 100644 --- a/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java +++ b/engine/components-api/src/main/java/com/cloud/template/TemplateManager.java @@ -120,7 +120,7 @@ public interface TemplateManager { DataStore getImageStore(long tmpltId); - Long getTemplateSize(long templateId, long zoneId); + Long getTemplateSize(VirtualMachineTemplate template, long zoneId); DataStore getImageStore(String storeUuid, Long zoneId, VolumeVO volume); @@ -143,7 +143,7 @@ public interface TemplateManager { TemplateType validateTemplateType(BaseCmd cmd, boolean isAdmin, boolean isCrossZones); - List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role, String configurationId); + List getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId); static Boolean getValidateUrlIsResolvableBeforeRegisteringTemplateValue() { return ValidateUrlIsResolvableBeforeRegisteringTemplate.value(); diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java index 37683aa3758e..ef1c71e6b014 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkJobHandlerProxy.java @@ -26,9 +26,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import com.cloud.serializer.GsonHelper; import com.cloud.utils.Pair; -import com.google.gson.Gson; /** * VmWorkJobHandlerProxy can not be used as standalone due to run-time diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 66b19205c6bd..7270742c1364 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1786,13 +1786,13 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call } // apply public load balancer rules - if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { + if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Public)) { logger.warn("Failed to reapply Public load balancer rules as a part of network {} restart", network); success = false; } // apply internal load balancer rules - if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { + if (!_lbMgr.applyLoadBalancersForNetwork(network, Scheme.Internal)) { logger.warn("Failed to reapply internal load balancer rules as a part of network {} restart", network); success = false; } @@ -1975,7 +1975,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (services.contains(Service.Firewall.getName())) { //revoke all firewall rules for the network try { - if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { + if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, userId, caller)) { logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { logger.warn("Failed to cleanup Firewall rules as a part of network {} cleanup", network); @@ -3260,7 +3260,7 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont } } if (cleanupNeeded) { - cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId()); + cleanupResult = shutdownNetworkResources(network, context.getAccount(), context.getCaller().getId()); } } catch (final Exception ex) { logger.warn("shutdownNetworkRules failed during the network {} shutdown due to", network, ex); @@ -4017,7 +4017,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call //revoke all firewall rules for the network try { - if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { + if (_firewallMgr.revokeAllFirewallRulesForNetwork(network, callerUserId, caller)) { logger.debug("Successfully cleaned up firewallRules rules for network {}", network); } else { success = false; @@ -4057,7 +4057,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call logger.debug("Portable IP address {} is no longer associated with any network", ipToRelease); } } else { - _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ipToRelease, network); } } @@ -4075,13 +4075,12 @@ private boolean cleanupNetworkResources(final long networkId, final Account call return success; } - private boolean shutdownNetworkResources(final long networkId, final Account caller, final long callerUserId) { + private boolean shutdownNetworkResources(final Network network, final Account caller, final long callerUserId) { // This method cleans up network rules on the backend w/o touching them in the DB boolean success = true; - final Network network = _networksDao.findById(networkId); // Mark all PF rules as revoked and apply them on the backend (not in the DB) - final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); + final List pfRules = _portForwardingRulesDao.listByNetwork(network.getId()); logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), network); for (final PortForwardingRuleVO pfRule : pfRules) { @@ -4100,7 +4099,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } // Mark all static rules as revoked and apply them on the backend (not in the DB) - final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); + final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(network.getId(), Purpose.StaticNat); final List staticNatRules = new ArrayList(); logger.debug("Releasing {} static nat rules for network {} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), network); @@ -4129,7 +4128,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } try { - if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Public)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Public)) { logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); success = false; } @@ -4139,7 +4138,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } try { - if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Internal)) { + if (!_lbMgr.revokeLoadBalancersForNetwork(network, Scheme.Internal)) { logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); success = false; } @@ -4149,7 +4148,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } // revoke all firewall rules for the network w/o applying them on the DB - final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); + final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Ingress); logger.debug("Releasing firewall ingress rules for network {} as a part of shutdownNetworkRules", firewallRules.size(), network); for (final FirewallRuleVO firewallRule : firewallRules) { @@ -4167,7 +4166,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal success = false; } - final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); + final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(network.getId(), Purpose.Firewall, FirewallRule.TrafficType.Egress); logger.debug("Releasing {} firewall egress rules for network {} as a part of shutdownNetworkRules", firewallEgressRules.size(), network); try { @@ -4176,7 +4175,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall) && (network.getGuestType() == Network.GuestType.Isolated || network.getGuestType() == Network.GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) { // add default egress rule to accept the traffic - _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(networkId), false); + _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), _networkModel.getNetworkEgressDefaultPolicy(network.getId()), false); } } catch (final ResourceUnavailableException ex) { @@ -4204,7 +4203,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal try { //revoke all Network ACLs for the network w/o applying them in the DB - if (!_networkACLMgr.revokeACLItemsForNetwork(networkId)) { + if (!_networkACLMgr.revokeACLItemsForNetwork(network.getId())) { logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); success = false; } @@ -4216,13 +4215,13 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } //release all static nats for the network - if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { + if (!_rulesMgr.applyStaticNatForNetwork(network.getId(), false, caller, true)) { logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network {}", network); success = false; } // Get all ip addresses, mark as releasing and release them on the backend - final List userIps = _ipAddressDao.listByAssociatedNetwork(networkId, null); + final List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); final List publicIpsToRelease = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { for (final IPAddressVO userIp : userIps) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 36e281459492..060619088882 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -885,7 +885,7 @@ private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering Account owner, long deviceId, String configurationId) { assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template."; - Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); + Long size = _tmpltMgr.getTemplateSize(template, vm.getDataCenterId()); if (rootDisksize != null) { if (template.isDeployAsIs()) { // Volume size specified from template deploy-as-is @@ -994,7 +994,7 @@ public List allocateTemplatedVolumes(Type type, String name, DiskOf if (configurationDetail != null) { configurationId = configurationDetail.getValue(); } - templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template.getId(), DataStoreRole.Image, configurationId); + templateAsIsDisks = _tmpltMgr.getTemplateDisksOnImageStore(template, DataStoreRole.Image, configurationId); if (CollectionUtils.isNotEmpty(templateAsIsDisks)) { templateAsIsDisks = templateAsIsDisks.stream() .filter(x -> !x.isIso()) diff --git a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java index 7423ded598f3..9f0f1c6929a3 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java @@ -193,7 +193,12 @@ public void setPhysicalNetworkId(Long physicalNetworkId) { public String toString() { if (toString == null) { toString = - new StringBuilder("Vlan[").append(vlanTag) + new StringBuilder("Vlan[") + .append(id) + .append("|") + .append(uuid) + .append("|") + .append(vlanTag) .append("|") .append(vlanGateway) .append("|") diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java index 4c36a3401ca6..7f838a6c61e9 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java @@ -26,6 +26,7 @@ import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -206,7 +207,9 @@ public void setState(Domain.State state) { @Override public String toString() { - return new StringBuilder("Domain:").append(id).append(path).toString(); + return String.format("Domain %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "path")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java b/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java index 22bb2c26b652..ee5f67b09cd1 100644 --- a/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/LBHealthCheckPolicyVO.java @@ -27,6 +27,7 @@ import javax.persistence.Table; import com.cloud.network.rules.HealthCheckPolicy; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "load_balancer_healthcheck_policies") @@ -169,4 +170,11 @@ public void setDisplay(boolean display) { public boolean isDisplay() { return display; } + + @Override + public String toString() { + return String.format("LBHealthCheckPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "pingPath")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java index fa5dcafba341..36411d720b01 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "autoscale_policies") @@ -92,7 +93,9 @@ public AutoScalePolicyVO(String name, long domainId, long accountId, int duratio @Override public String toString() { - return new StringBuilder("AutoScalePolicy[").append("id-").append(id).append("]").toString(); + return String.format("AutoScalePolicy %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java index 652cbb340a32..8c408e24f652 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java @@ -127,6 +127,7 @@ public AutoScaleVmGroupVO(long lbRuleId, long zoneId, long domainId, @Override public String toString() { return new StringBuilder("AutoScaleVmGroupVO[").append("id=").append(id) + .append("|uuid=").append(uuid) .append("|name=").append(name) .append("|loadBalancerId=").append(loadBalancerId) .append("|profileId=").append(profileId) diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java index 21291062756c..3d869a897dd0 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java @@ -37,6 +37,7 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; @@ -126,7 +127,9 @@ public AutoScaleVmProfileVO(long zoneId, long domainId, long accountId, long ser @Override public String toString() { - return new StringBuilder("AutoScaleVMProfileVO[").append("id").append(id).append("-").append("templateId").append("-").append(templateId).append("]").toString(); + return String.format("AutoScaleVMProfileVO %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "templateId", "uuid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java b/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java index 18e67a4af61c..0679dac32355 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/ConditionVO.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "conditions") @@ -91,7 +92,9 @@ public long getId() { @Override public String toString() { - return new StringBuilder("Condition[").append("id-").append(id).append("]").toString(); + return String.format("Condition %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java index e5ab9886dda7..c13076baa46b 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java @@ -34,6 +34,7 @@ import com.cloud.network.Network; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "counter") @@ -79,7 +80,9 @@ public CounterVO(Source source, String name, String value, Network.Provider prov @Override public String toString() { - return new StringBuilder("Counter[").append("id-").append(id).append("]").toString(); + return String.format("Counter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java b/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java index 80bec1b8152f..88c5c0885a87 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/ExternalLoadBalancerDeviceVO.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.network.ExternalNetworkDeviceManager; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * ExternalLoadBalancerDeviceVO contains information on external load balancer devices (F5/Netscaler VPX,MPX,SDX) added into a deployment @@ -244,4 +245,11 @@ public String getUuid() { public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + return String.format("ExternalLoadBalancerDevice %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "providerName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java index 4c7569a55b96..88e146d2a804 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressVO.java @@ -33,6 +33,7 @@ import com.cloud.network.IpAddress; import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.Ip; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * A bean representing a public IP Address @@ -268,7 +269,9 @@ public void setState(State state) { @Override public String toString() { - return new StringBuilder("Ip[").append(address).append("-").append(dataCenterId).append("]").toString(); + return String.format("IPAddress %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "dataCenterId", "address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java b/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java index e9f50a75a7b7..72b8fc151b78 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LBStickinessPolicyVO.java @@ -33,6 +33,7 @@ import com.cloud.network.rules.StickinessPolicy; import com.cloud.utils.Pair; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "load_balancer_stickiness_policies") @@ -162,4 +163,11 @@ public void setDisplay(boolean display) { public boolean isDisplay() { return display; } + + @Override + public String toString() { + return String.format("LBStickinessPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "methodName")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java index bd5ea95dcc7f..ad0338b98497 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVO.java @@ -27,6 +27,7 @@ import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.LoadBalancer; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * This VO represents Public Load Balancer @@ -136,4 +137,11 @@ public Scheme getScheme() { public String getCidrList() { return cidrList; } + + @Override + public String toString() { + return String.format("LoadBalancer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "purpose", "state")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java index 415b513b405a..80d40b13f32a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java @@ -35,6 +35,7 @@ import com.cloud.network.Network.Service; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "physical_network_service_providers") @@ -109,6 +110,13 @@ public PhysicalNetworkServiceProviderVO(long physicalNetworkId, String name) { this.uuid = UUID.randomUUID().toString(); } + + @Override + public String toString() { + return String.format("PhysicalNetworkServiceProvider %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "providerName")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java index 52ebe7596a41..93850a322dd5 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java @@ -37,6 +37,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * NetworkConfigurationVO contains information about a specific physical network. @@ -248,4 +249,11 @@ public void setUuid(String uuid) { public String getName() { return name; } + + @Override + public String toString() { + return String.format("PhysicalNetwork %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java index 95e3693a99c5..2439ea55b4a8 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnVO.java @@ -18,6 +18,7 @@ import com.cloud.network.RemoteAccessVpn; import com.cloud.utils.db.Encrypt; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -86,6 +87,11 @@ public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long pub this.vpcId = vpcId; } + @Override + public String toString() { + return String.format("RemoteAccessVpn %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid")); + } + @Override public State getState() { return state; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java index 52741fdd9a54..e5394238c315 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteCustomerGatewayVO.java @@ -29,6 +29,7 @@ import com.cloud.network.Site2SiteCustomerGateway; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -110,6 +111,13 @@ public Site2SiteCustomerGatewayVO(String name, long accountId, long domainId, St this.ikeVersion = ikeVersion; } + @Override + public String toString() { + return String.format("Site2SiteCustomerGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java index 703c78c7b861..a5eb7efce234 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayVO.java @@ -28,6 +28,7 @@ import com.cloud.network.Site2SiteVpnGateway; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @@ -70,6 +71,13 @@ public Site2SiteVpnGatewayVO(long accountId, long domainId, long addrId, long vp this.domainId = domainId; } + @Override + public String toString() { + return String.format("Site2SiteVpnGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java index 1980cd33d146..325a6efc867f 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupRuleVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.security; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -89,6 +91,13 @@ public SecurityGroupRuleVO(SecurityRuleType type, long securityGroupId, int from } } + @Override + public String toString() { + return String.format("SecurityGroupRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "type")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java index ec1cfae43b63..940baaad18d7 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/security/SecurityGroupVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.security; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -60,6 +62,13 @@ public SecurityGroupVO(String name, String description, long domainId, long acco uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SecurityGroup %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java index f28b3125a09d..c2a52663f216 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java @@ -35,6 +35,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_acl_item") @@ -168,7 +169,9 @@ public Date getCreated() { @Override public String toString() { - return new StringBuilder("Rule[").append(id).append("-").append("NetworkACL").append("-").append(state).append("]").toString(); + return String.format("NetworkACLItem %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java index 280d5dfaf4b2..37b9e7ff296a 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLVO.java @@ -89,7 +89,7 @@ public String getName() { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "vpcId"); + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "vpcId"); } public void setUuid(String uuid) { diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java index 72f6a89e70fc..b1d4df35d4ca 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcGatewayVO.java @@ -29,6 +29,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc_gateways") @@ -163,9 +164,9 @@ public long getNetworkId() { @Override public String toString() { - StringBuilder buf = new StringBuilder("VpcGateway["); - buf.append(id).append("|").append(ip4Address.toString()).append("|").append(vpcId).append("]"); - return buf.toString(); + return String.format("VpcGateway %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vpcId", "ip4Address")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java index 41254ba4a8ba..d4f0783451c4 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java @@ -30,6 +30,7 @@ import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vpc_offerings") @@ -180,8 +181,9 @@ public void setUniqueName(String uniqueName) { @Override public String toString() { - StringBuilder buf = new StringBuilder("[VPC Offering ["); - return buf.append(id).append("-").append(name).append("]").toString(); + return String.format("VPCOffering %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); } public void setName(String name) { diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java index 36e772edd3a8..2c127986f9f4 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java @@ -29,6 +29,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "project_invitations") @@ -127,9 +128,9 @@ public void setState(State state) { @Override public String toString() { - StringBuilder buf = new StringBuilder("ProjectInvitation["); - buf.append(id).append("|projectId=").append(projectId).append("|accountId=").append(forAccountId).append("]"); - return buf.toString(); + return String.format("ProjectInvitation %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "projectId", "uuid", "forAccountId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java index c8faa00812c5..d4d35e677052 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java @@ -117,7 +117,9 @@ public Date getRemoved() { @Override public String toString() { - return String.format("Project %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid", "domainId")); + return String.format("Project %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "domainId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java index b4f112f98e8b..5da09f569f25 100644 --- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java @@ -34,6 +34,7 @@ import com.cloud.offering.DiskOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "disk_offering") @@ -588,4 +589,11 @@ public boolean getDiskSizeStrictness() { public void setDiskSizeStrictness(boolean diskSizeStrictness) { this.diskSizeStrictness = diskSizeStrictness; } + + @Override + public String toString() { + return String.format("DiskOffering %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java index c78485868265..f57d9d3dccf2 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotPolicyVO.java @@ -27,6 +27,7 @@ import com.cloud.storage.snapshot.SnapshotPolicy; import com.cloud.utils.DateUtil.IntervalType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshot_policy") @@ -76,6 +77,13 @@ public SnapshotPolicyVO(long volumeId, String schedule, String timezone, Interva this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SnapshotPolicy %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "volumeId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java index 86e0da53666f..dc2694cfbb12 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java @@ -29,6 +29,7 @@ import javax.persistence.TemporalType; import com.cloud.storage.snapshot.SnapshotSchedule; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; @@ -73,6 +74,13 @@ public SnapshotScheduleVO(long volumeId, long policyId, Date scheduledTimestamp) this.asyncJobId = null; } + @Override + public String toString() { + return String.format("SnapshotSchedule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "volumeId", "policyId")); + } + @Override public long getId() { return id; @@ -134,11 +142,4 @@ public String getUuid() { public void setUuid(String uuid) { this.uuid = uuid; } - - @Override - public String toString() { - ReflectionToStringBuilder reflectionToStringBuilder = new ReflectionToStringBuilder(this, ToStringStyle.JSON_STYLE); - reflectionToStringBuilder.setExcludeFieldNames("id"); - return reflectionToStringBuilder.toString(); - } } diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index 1da7d52a366a..d204f67dc939 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -36,6 +36,7 @@ import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; @Entity @@ -130,6 +131,12 @@ public enum Setup2FAstatus { public UserAccountVO() { } + @Override + public String toString() { + return String.format("UserAccount %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields + (this, "id", "uuid", "username", "accountName")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/user/UserVO.java b/engine/schema/src/main/java/com/cloud/user/UserVO.java index 7dac26429ace..6e355e102e6c 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserVO.java @@ -296,7 +296,7 @@ public void setRegistered(boolean registered) { @Override public String toString() { - return String.format("User %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "username", "uuid")); + return String.format("User %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "username")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java b/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java index 4437af29bc1d..d5bd8c5aaae9 100644 --- a/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/InstanceGroupVO.java @@ -32,6 +32,7 @@ import com.cloud.user.Account; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "instance_group") @@ -74,6 +75,12 @@ protected InstanceGroupVO() { super(); } + @Override + public String toString() { + return String.format("InstanceGroup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name")); + } + + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java index 093434052bc1..63eea3e20ddb 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java @@ -28,6 +28,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.vm.NicSecondaryIp; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "nic_secondary_ips") @@ -87,6 +88,14 @@ protected NicSecondaryIpVO() { @Column(name = "vmId") long vmId; + @Override + public String toString() { + return String.format("NicSecondaryIp %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid", "vmId", + "nicId", "ip4Address", "ip6Address", "networkId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java index 084df29fa427..7f534a226d58 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java @@ -126,7 +126,7 @@ public boolean isDefault() { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid", "roleType"); + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "roleType"); } public boolean isPublicRole() { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java index ba31dc59d390..fd3c0be18d22 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java @@ -29,6 +29,7 @@ import javax.persistence.TemporalType; import com.cloud.utils.DateUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "backup_schedule") @@ -68,6 +69,12 @@ public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String sc this.scheduledTimestamp = scheduledTimestamp; } + @Override + public String toString() { + return String.format("BackupSchedule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "vmId", "schedule", "scheduleType")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 9b285e66cab9..b4cd2f7badae 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -19,6 +19,7 @@ import com.cloud.utils.db.GenericDao; import com.google.gson.Gson; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import java.util.Arrays; @@ -94,6 +95,12 @@ public BackupVO() { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("Backup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "vmId", "backupType", "externalId")); + } + @Override public long getId() { return id; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java b/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java index 36aefa201f37..3c35f59659f6 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadCertificateVO.java @@ -18,6 +18,7 @@ import com.cloud.hypervisor.Hypervisor; import org.apache.cloudstack.util.HypervisorTypeConverter; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Convert; @@ -57,6 +58,13 @@ public DirectDownloadCertificateVO() { this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("DirectDownloadCertificate %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "alias")); + } + public void setId(Long id) { this.id = id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/lb/ApplicationLoadBalancerRuleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/lb/ApplicationLoadBalancerRuleVO.java index d8ee8631b0b9..4fec96067a36 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/lb/ApplicationLoadBalancerRuleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/lb/ApplicationLoadBalancerRuleVO.java @@ -30,6 +30,7 @@ import com.cloud.network.rules.FirewallRuleVO; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * This VO represent Internal Load Balancer rule. @@ -90,6 +91,13 @@ public ApplicationLoadBalancerRuleVO(String name, String description, int srcPor this.scheme = scheme; } + @Override + public String toString() { + return String.format("ApplicationLoadBalancerRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "purpose", "state")); + } + @Override public Long getSourceIpNetworkId() { return sourceIpNetworkId; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleVO.java index 1865b9a67834..4ce7033156fa 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancerRuleVO.java @@ -29,6 +29,7 @@ import javax.persistence.Table; import com.cloud.region.ha.GlobalLoadBalancerRule; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "global_load_balancing_rules") @@ -92,6 +93,15 @@ public GlobalLoadBalancerRuleVO(String name, String description, String gslbDoma this.state = state; } + + @Override + public String toString() { + return String.format("GlobalLoadBalancerRule %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + + @Override public String getName() { return name; diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java index b7468195f5da..808c319b40f2 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java @@ -303,7 +303,6 @@ private void configureAndTestSendCommandTest(Class exc Mockito.lenient().when(dataStoreVO.getId()).thenReturn(0l); ImageStoreEntity destDataStore = Mockito.mock(ImageStoreImpl.class); - Mockito.doReturn(0l).when(destDataStore).getId(); Answer copyCommandAnswer = Mockito.mock(Answer.class); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 0bd4b5d8bc07..717b70c34aad 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -194,7 +194,7 @@ public class VolumeServiceImpl implements VolumeService { @Inject HostDao _hostDao; @Inject - private PrimaryDataStoreDao storagePoolDao; + PrimaryDataStoreDao storagePoolDao; @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Inject diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index c4241dfbc3a1..aa5ac3b9a76e 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -47,6 +47,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -85,6 +86,9 @@ public class VolumeServiceTest extends TestCase{ @Mock StorageManager storageManagerMock; + @Mock + PrimaryDataStoreDao primaryDataStoreDao; + @Mock VolumeVO volumeVoMock; @@ -105,6 +109,7 @@ public void setup(){ volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; volumeServiceImplSpy._storageMgr = storageManagerMock; volumeServiceImplSpy._hostDao = hostDaoMock; + volumeServiceImplSpy.storagePoolDao = primaryDataStoreDao; volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock; } @@ -220,6 +225,7 @@ public void validateDestroySourceVolumeAfterMigrationExpungeSourceVolumeAfterMig VolumeVO vo = new VolumeVO() {}; vo.setPoolType(Storage.StoragePoolType.Filesystem); volumeObject.configure(null, vo); + vo.setPoolId(1L); List exceptions = new ArrayList<>(Arrays.asList(new InterruptedException(), new ExecutionException() {})); diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java index bea5a2c3f25e..9b96bd4138d6 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java @@ -28,6 +28,7 @@ import javax.naming.ConfigurationException; import javax.persistence.EntityExistsException; +import com.cloud.user.User; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -280,24 +281,24 @@ public boolean implement(final Network network, final NetworkOffering offering, final List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return false; } List asaList = _ciscoAsa1000vDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (asaList.isEmpty()) { - logger.debug("No Cisco ASA 1000v device on network " + network.getName()); + logger.debug("No Cisco ASA 1000v device on network {}", network); return false; } NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork != null) { - logger.debug("Cisco ASA 1000v device already associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device already associated with network {}", network); return true; } if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.CiscoVnmc)) { - logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("SourceNat service is not provided by Cisco Vnmc device on network {}", network); return false; } @@ -305,21 +306,21 @@ public boolean implement(final Network network, final NetworkOffering offering, // ensure that there is an ASA 1000v assigned to this network CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network); if (assignedAsa == null) { - logger.error("Unable to assign ASA 1000v device to network " + network.getName()); - throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName()); + logger.error("Unable to assign ASA 1000v device to network {}", network); + throw new CloudRuntimeException(String.format("Unable to assign ASA 1000v device to network %s", network)); } ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId()); ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId()); if (clusterVsmMap == null) { - logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); - throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); + logger.error("Vmware cluster {} has no Cisco Nexus VSM device associated with it", asaCluster); + throw new CloudRuntimeException(String.format("Vmware cluster %s has no Cisco Nexus VSM device associated with it", asaCluster)); } CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId()); if (vsmDevice == null) { - logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); - throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); + logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster {}", asaCluster); + throw new CloudRuntimeException(String.format("Unable to load details of Cisco Nexus VSM device associated with cluster %s", asaCluster)); } CiscoVnmcControllerVO ciscoVnmcDevice = devices.get(0); @@ -350,8 +351,8 @@ public boolean implement(final Network network, final NetworkOffering offering, if (outsideIp == null) { // none available, acquire one try { Account caller = CallContext.current().getCallingAccount(); - long callerUserId = CallContext.current().getCallingUserId(); - outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone, true, null); + User callerUser = CallContext.current().getCallingUser(); + outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUser, zone, true, null); } catch (ResourceAllocationException e) { logger.error("Unable to allocate additional public Ip address. Exception details " + e); throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e); @@ -373,29 +374,27 @@ public boolean implement(final Network network, final NetworkOffering offering, // all public ip addresses must be from same subnet, this essentially means single public subnet in zone if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) { - logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); - throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to create logical edge firewall in Cisco VNMC device for network {}", network); + throw new CloudRuntimeException(String.format("Failed to create logical edge firewall in Cisco VNMC device for network %s", network)); } // create stuff in VSM for ASA device if (!configureNexusVsmForAsa(vlanId, network.getGateway(), vsmDevice.getUserName(), vsmDevice.getPassword(), vsmDevice.getipaddr(), assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) { - logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); - throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); + logger.error("Failed to configure Cisco Nexus VSM {} for ASA device for network {}", vsmDevice.getipaddr(), network); + throw new CloudRuntimeException(String.format("Failed to configure Cisco Nexus VSM %s for ASA device for network %s", vsmDevice.getipaddr(), network)); } // configure source NAT if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) { - logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); - throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to configure source NAT in Cisco VNMC device for network {}", network); + throw new CloudRuntimeException(String.format("Failed to configure source NAT in Cisco VNMC device for network %s", network)); } // associate Asa 1000v instance with logical edge firewall if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) { - logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + - network.getName()); - throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + - ") with logical edge firewall in VNMC for network " + network.getName()); + logger.error("Failed to associate Cisco ASA 1000v ({}) with logical edge firewall in VNMC for network {}", assignedAsa.getManagementIp(), network); + throw new CloudRuntimeException(String.format("Failed to associate Cisco ASA 1000v (%s) with logical edge firewall in VNMC for network %s", assignedAsa.getManagementIp(), network)); } } catch (CloudRuntimeException e) { unassignAsa1000vFromNetwork(network); @@ -640,27 +639,26 @@ public IpDeployer getIpDeployer(Network network) { public boolean applyFWRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Firewall, Provider.CiscoVnmc)) { - logger.error("Firewall service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Firewall service is not provided by Cisco Vnmc device on network {}", network); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network {}", network); return true; } if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -698,27 +696,26 @@ public boolean applyFWRules(Network network, List rules) public boolean applyPFRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.PortForwarding, Provider.CiscoVnmc)) { - logger.error("Port forwarding service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Port forwarding service is not provided by Cisco Vnmc device on network {}", network); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network {}", network); return true; } if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply port forwarding rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply port forwarding rules for network with ID {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -752,27 +749,26 @@ public boolean applyPFRules(Network network, List rules) thr @Override public boolean applyStaticNats(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.StaticNat, Provider.CiscoVnmc)) { - logger.error("Static NAT service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Static NAT service is not provided by Cisco Vnmc device on network {}", network); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network {}", network); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network {}", network); return true; } if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply static NAT rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply static NAT rules for network with ID {}; this network is not implemented. Skipping backend commands.", network); return true; } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java index 6812fa495324..714855934656 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java @@ -314,7 +314,7 @@ private void releaseIp(final long ipId, final long userId, final Account caller) final IPAddressVO ipvo = _ipAddressDao.findById(ipId); ipvo.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipvo.getId(), ipvo); - _ipAddrMgr.disassociatePublicIpAddress(ipId, userId, caller); + _ipAddrMgr.disassociatePublicIpAddress(ipvo, userId, caller); _ipAddressDao.unassignIpAddress(ipId); } diff --git a/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java b/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java index 3a648cdcbf0a..a865ff19f7b2 100644 --- a/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java +++ b/server/src/main/java/com/cloud/acl/AffinityGroupAccessChecker.java @@ -67,10 +67,10 @@ public boolean checkAccess(Account caller, ControlledEntity entity, AccessType a if (!_affinityGroupService.isAffinityGroupAvailableInDomain(group.getId(), caller.getDomainId())) { DomainVO callerDomain = _domainDao.findById(caller.getDomainId()); if (callerDomain == null) { - throw new CloudRuntimeException("cannot check permission on account " + caller.getAccountName() + " whose domain does not exist"); + throw new CloudRuntimeException(String.format("cannot check permission on account %s whose domain does not exist", caller)); } - throw new PermissionDeniedException("Affinity group is not available in domain id=" + callerDomain.getUuid()); + throw new PermissionDeniedException(String.format("Affinity group is not available in domain id=%s", callerDomain)); } else { return true; } diff --git a/server/src/main/java/com/cloud/acl/DomainChecker.java b/server/src/main/java/com/cloud/acl/DomainChecker.java index e9f60ea7aa1a..97832311b178 100644 --- a/server/src/main/java/com/cloud/acl/DomainChecker.java +++ b/server/src/main/java/com/cloud/acl/DomainChecker.java @@ -137,21 +137,21 @@ private void checkPublicTemplateAccess(VirtualMachineTemplate template, Account @Override public boolean checkAccess(Account caller, Domain domain) throws PermissionDeniedException { if (caller.getState() != Account.State.ENABLED) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + " is disabled."); + throw new PermissionDeniedException(String.format("Account %s is disabled.", caller)); } if (domain == null) { - throw new PermissionDeniedException(String.format("Provided domain is NULL, cannot check access for account [uuid=%s, name=%s]", caller.getUuid(), caller.getAccountName())); + throw new PermissionDeniedException(String.format("Provided domain is NULL, cannot check access for account [%s]", caller)); } long domainId = domain.getId(); if (_accountService.isNormalUser(caller.getId())) { if (caller.getDomainId() != domainId) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + " does not have permission to operate within domain id=" + domain.getUuid()); + throw new PermissionDeniedException(String.format("Account %s does not have permission to operate within domain id=%s", caller, domain.getUuid())); } } else if (!_domainDao.isChildDomain(caller.getDomainId(), domainId)) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + " does not have permission to operate within domain id=" + domain.getUuid()); + throw new PermissionDeniedException(String.format("Account %s does not have permission to operate within domain id=%s", caller, domain.getUuid())); } return true; @@ -187,8 +187,7 @@ public boolean checkAccess(Account caller, ControlledEntity entity, AccessType a // account can launch a VM from this template LaunchPermissionVO permission = _launchPermissionDao.findByTemplateAndAccount(template.getId(), caller.getId()); if (permission == null) { - throw new PermissionDeniedException("Account " + caller.getAccountName() + - " does not have permission to launch instances from template " + template.getName()); + throw new PermissionDeniedException(String.format("Account %s does not have permission to launch instances from template %s", caller, template)); } } else { // Domain admin and regular user can delete/modify only templates created by them @@ -221,8 +220,6 @@ public boolean checkAccess(Account caller, ControlledEntity entity, AccessType a protected void validateCallerHasAccessToEntityOwner(Account caller, ControlledEntity entity, AccessType accessType) { PermissionDeniedException exception = new PermissionDeniedException("Caller does not have permission to operate with provided resource."); - String entityLog = String.format("entity [owner ID: %d, type: %s]", entity.getAccountId(), - entity.getEntityType().getSimpleName()); if (_accountService.isRootAdmin(caller.getId())) { return; @@ -233,6 +230,7 @@ protected void validateCallerHasAccessToEntityOwner(Account caller, ControlledEn } Account owner = _accountDao.findById(entity.getAccountId()); + String entityLog = String.format("entity [owner: %s, type: %s]", owner, entity.getEntityType().getSimpleName()); if (owner == null) { logger.error(String.format("Owner not found for %s", entityLog)); throw exception; @@ -248,20 +246,20 @@ protected void validateCallerHasAccessToEntityOwner(Account caller, ControlledEn // only project owner can delete/modify the project if (accessType == AccessType.ModifyProject) { if (!_projectMgr.canModifyProjectAccount(caller, owner.getId())) { - logger.error(String.format("Caller ID: %d does not have permission to modify project with " + - "owner ID: %d", caller.getId(), owner.getId())); + logger.error("Caller: {} does not have permission to modify project with " + + "owner: {}", caller, owner); throw exception; } } else if (!_projectMgr.canAccessProjectAccount(caller, owner.getId())) { - logger.error(String.format("Caller ID: %d does not have permission to access project with " + - "owner ID: %d", caller.getId(), owner.getId())); + logger.error("Caller: {} does not have permission to access project with " + + "owner: {}", caller, owner); throw exception; } checkOperationPermitted(caller, entity); return; } - logger.error(String.format("Caller ID: %d does not have permission to access %s", caller.getId(), entityLog)); + logger.error("Caller: {} does not have permission to access {}", caller, entityLog); throw exception; } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 99ac2492e833..4a5f80571ae7 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -321,15 +321,14 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V } if (avoid.shouldAvoid(host)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + logger.debug("Host: {} is in avoid set, skipping this and trying other available hosts", host); } continue; } //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - logger.debug(() -> String.format("Adding host [%s] to the avoid set because this host already has the max number of running (user and/or system) VMs.", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "name"))); + logger.debug("Adding host [{}] to the avoid set because this host already has the max number of running (user and/or system) VMs.", host); avoid.addHost(host.getId()); continue; } @@ -337,9 +336,8 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V // Check if GPU device is required by offering and host has the availability if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.pciDevice.toString()); - if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - logger.debug(String.format("Adding host [%s] to avoid set, because this host does not have required GPU devices available.", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "name"))); + if(!_resourceMgr.isGPUDeviceAvailable(host, groupName.getValue(), offeringDetails.getValue())){ + logger.debug("Adding host [{}] to avoid set, because this host does not have required GPU devices available.", host); avoid.addHost(host.getId()); continue; } @@ -347,12 +345,13 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V Pair cpuCapabilityAndCapacity = _capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (cpuCapabilityAndCapacity.first() && cpuCapabilityAndCapacity.second()) { if (logger.isDebugEnabled()) { - logger.debug("Found a suitable host, adding to list: " + host.getId()); + logger.debug("Found a suitable host, adding to list: {}", host); } suitableHosts.add(host); } else { if (logger.isDebugEnabled()) { - logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + logger.debug("Not using host {}; host has cpu capability? {}, host has capacity?{}", + host, cpuCapabilityAndCapacity.first(), cpuCapabilityAndCapacity.second()); } avoid.addHost(host.getId()); } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index 51b45a2dc983..3a2f3a86d5fd 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -94,7 +94,7 @@ public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type List vols = _volsDao.findByInstance(vm.getId()); VolumeVO vol = vols.get(0); long podId = vol.getPodId(); - logger.debug("Pod id determined from volume " + vol.getId() + " is " + podId); + logger.debug("Pod id determined from volume {} is {}", vol, podId); Iterator it = pcs.iterator(); while (it.hasNext()) { PodCluster pc = it.next(); diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java index f710e5bc8460..b5fb77c8179d 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java @@ -86,8 +86,8 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi long zoneId = zone.getId(); List podsInZone = _podDao.listByDataCenterId(zoneId); - if (podsInZone.size() == 0) { - logger.debug("No pods found in zone " + zone.getName()); + if (podsInZone.isEmpty()) { + logger.debug("No pods found in zone {}", zone); return null; } @@ -111,7 +111,7 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi if (!enoughCapacity) { if (logger.isDebugEnabled()) { - logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: {}, pod: {})", zone, pod); } continue; } @@ -121,7 +121,7 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi dataCenterAndPodHasEnoughCapacity(zoneId, podId, ((long)offering.getCpu() * offering.getSpeed()), Capacity.CAPACITY_TYPE_CPU, hostCandiates); if (!enoughCapacity) { if (logger.isDebugEnabled()) { - logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: {}, pod: {})", zone, pod); } continue; } @@ -144,14 +144,14 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi } } - if (availablePods.size() == 0) { - logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName()); + if (availablePods.isEmpty()) { + logger.debug("There are no pods with enough memory/CPU capacity in zone {}", zone); return null; } else { // Return a random pod int next = _rand.nextInt(availablePods.size()); HostPodVO selectedPod = availablePods.get(next); - logger.debug("Found pod " + selectedPod.getName() + " in zone " + zone.getName()); + logger.debug("Found pod {} in zone {}", selectedPod, zone); return new Pair(selectedPod, podHostCandidates.get(selectedPod.getId())); } } @@ -195,7 +195,7 @@ private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, private boolean skipCalculation(VMInstanceVO vm) { if (vm.getState() == State.Expunging) { if (logger.isDebugEnabled()) { - logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); + logger.debug("Skip counting capacity for Expunging VM: {}", vm); } return true; } @@ -261,15 +261,16 @@ private long calcHostAllocatedCpuMemoryCapacity(long hostId, short capacityType) usedCapacity += so.getRamSize() * 1024L * 1024L; if (logger.isDebugEnabled()) { - logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " + - toHumanReadableSize(usedCapacity) + " Bytes"); + logger.debug("Counting memory capacity used by vm: {}, size: {}MB, " + + "host: {}, currently counted: {} Bytes", + vm, so.getRamSize(), hostId, toHumanReadableSize(usedCapacity)); } } else if (capacityType == Capacity.CAPACITY_TYPE_CPU) { usedCapacity += so.getCpu() * so.getSpeed(); if (logger.isDebugEnabled()) { - logger.debug("Counting cpu capacity used by vm: " + vm.getId() + ", cpu: " + so.getCpu() + ", speed: " + so.getSpeed() + ", currently counted: " + - usedCapacity + " Bytes"); + logger.debug("Counting cpu capacity used by vm: {}, cpu: {}, speed: {}, " + + "currently counted: {} Bytes", vm, so.getCpu(), so.getSpeed(), usedCapacity); } } } diff --git a/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java b/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java index cc993445c231..ae884ddc4d1f 100644 --- a/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java +++ b/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java @@ -58,14 +58,14 @@ public void onClusterAlert(Object sender, EventArgs args) { private void onClusterNodeJoined(Object sender, ClusterNodeJoinEventArgs args) { if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getJoinedNodes()) { - logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node join alert, joined node: {} ({})", mshost.getServiceIP(), mshost); } } for (ManagementServerHostVO mshost : args.getJoinedNodes()) { if (mshost.getId() == args.getSelf().longValue()) { if (logger.isDebugEnabled()) { - logger.debug("Management server node " + mshost.getServiceIP() + " is up, send alert"); + logger.debug("Management server node {} ({}) is up, send alert", mshost.getServiceIP(), mshost); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is up", ""); @@ -78,7 +78,7 @@ private void onClusterNodeLeft(Object sender, ClusterNodeLeftEventArgs args) { if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getLeftNodes()) { - logger.debug("Handle cluster node left alert, leaving node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node left alert, leaving node: {} ({})", mshost.getServiceIP(), mshost); } } @@ -86,13 +86,13 @@ private void onClusterNodeLeft(Object sender, ClusterNodeLeftEventArgs args) { if (mshost.getId() != args.getSelf().longValue()) { if (_mshostDao.increaseAlertCount(mshost.getId()) > 0) { if (logger.isDebugEnabled()) { - logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, send alert"); + logger.debug("Detected management server node {} ({}) is down, send alert", mshost.getServiceIP(), mshost); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is down", ""); } else { if (logger.isDebugEnabled()) { - logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, but alert has already been set"); + logger.debug("Detected management server node {} ({}) is down, but alert has already been set", mshost.getServiceIP(), mshost); } } } diff --git a/server/src/main/java/com/cloud/api/ApiServer.java b/server/src/main/java/com/cloud/api/ApiServer.java index 98f87dfc3f08..824d60eec817 100644 --- a/server/src/main/java/com/cloud/api/ApiServer.java +++ b/server/src/main/java/com/cloud/api/ApiServer.java @@ -1036,8 +1036,8 @@ public boolean verifyRequest(final Map requestParameters, fina final Account account = userAcctPair.second(); if (user.getState() != Account.State.ENABLED || !account.getState().equals(Account.State.ENABLED)) { - logger.info("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + - "; accountState: " + account.getState()); + logger.info("disabled or locked user accessing the api, user = {} (state: {}); " + + "account: {} (state: {})", user, user.getState(), account, account.getState()); return false; } @@ -1052,7 +1052,7 @@ public boolean verifyRequest(final Map requestParameters, fina // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - logger.info("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + logger.info("User does not have a secret key associated with the account -- ignoring request, username: {}", user); return false; } @@ -1097,7 +1097,7 @@ private boolean commandAvailable(final InetAddress remoteAddress, final String c throw new ServerApiException(ApiErrorCode.UNAUTHORIZED , errorMessage); } catch (final OriginDeniedException ex) { // in this case we can remove the session with extreme prejudice - final String errorMessage = "The user '" + user.getUsername() + "' is not allowed to execute commands from ip address '" + remoteAddress.getHostName() + "'."; + final String errorMessage = String.format("The user '%s' is not allowed to execute commands from ip address '%s'.", user, remoteAddress.getHostName()); logger.debug(errorMessage); return false; } @@ -1278,7 +1278,7 @@ public boolean verifyUser(final Long userId) { if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.ENABLED) || (account == null) || !account.getState().equals(Account.State.ENABLED)) { - logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + logger.warn("Deleted/Disabled/Locked user [{} account={}] with id={} attempting to access public API", user, account, userId); return false; } return true; diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 976d3817a0a4..570d1be814dc 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -4334,7 +4334,7 @@ private Pair, Integer> listDataCentersInternal(ListZonesC List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account: {}", account); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -4374,7 +4374,7 @@ private Pair, Integer> listDataCentersInternal(ListZonesC List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account: {}", account); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -4604,13 +4604,13 @@ private Pair, Integer> searchForTemplatesInternal(Long temp throw new InvalidParameterValueException("Please specify a valid template ID."); }// If ISO requested then it should be ISO. if (isIso && template.getFormat() != ImageFormat.ISO) { - logger.error("Template Id " + templateId + " is not an ISO"); + logger.error("Template {} is not an ISO", template); InvalidParameterValueException ex = new InvalidParameterValueException("Specified Template Id is not an ISO"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; }// If ISO not requested then it shouldn't be an ISO. if (!isIso && template.getFormat() == ImageFormat.ISO) { - logger.error("Incorrect format of the template id " + templateId); + logger.error("Incorrect format of the template: {}", template); InvalidParameterValueException ex = new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the specified template id"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; diff --git a/server/src/main/java/com/cloud/api/query/vo/NetworkOfferingJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/NetworkOfferingJoinVO.java index 4ed54de80424..bded9a440ec1 100644 --- a/server/src/main/java/com/cloud/api/query/vo/NetworkOfferingJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/NetworkOfferingJoinVO.java @@ -30,6 +30,7 @@ import com.cloud.network.Networks; import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "network_offering_view") @@ -200,6 +201,12 @@ public class NetworkOfferingJoinVO extends BaseViewVO implements NetworkOffering public NetworkOfferingJoinVO() { } + @Override + public String toString() { + return String.format("NetworkOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "trafficType")); + } + @Override public long getId() { return id; diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 02abc507fdbc..dee1aa81758d 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -752,7 +752,7 @@ public String updateConfiguration(final long userId, final String name, final St resourceType = ApiCommandResourceType.StoragePool; if(name.equals(CapacityManager.StorageOverprovisioningFactor.key())) { if(!pool.getPoolType().supportsOverProvisioning() ) { - throw new InvalidParameterValueException("Unable to update storage pool with id " + resourceId + ". Overprovision not supported for " + pool.getPoolType()); + throw new InvalidParameterValueException(String.format("Unable to update storage pool %s. Overprovision not supported for %s", pool, pool.getPoolType())); } } @@ -1608,7 +1608,7 @@ private void checkPodAttributes(final long podId, final String podName, final Da if (checkForDuplicates) { // Check if the pod already exists if (validPod(podName, zone.getId())) { - throw new InvalidParameterValueException("A pod with name: " + podName + " already exists in zone " + zone.getId() + ". Please specify a different pod name. "); + throw new InvalidParameterValueException(String.format("A pod with name: %s already exists in zone %s. Please specify a different pod name. ", podName, zone)); } } @@ -1647,7 +1647,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List privateIps = _privateIpAddressDao.listByPodIdDcId(podId, pod.getDataCenterId()); if (!privateIps.isEmpty()) { if (!_privateIpAddressDao.deleteIpAddressByPod(podId)) { - throw new CloudRuntimeException("Failed to cleanup private ip addresses for pod " + podId); + throw new CloudRuntimeException(String.format("Failed to cleanup private ip addresses for pod %s", pod)); } } @@ -1655,7 +1655,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List localIps = _linkLocalIpAllocDao.listByPodIdDcId(podId, pod.getDataCenterId()); if (!localIps.isEmpty()) { if (!_linkLocalIpAllocDao.deleteIpAddressByPod(podId)) { - throw new CloudRuntimeException("Failed to cleanup private ip addresses for pod " + podId); + throw new CloudRuntimeException(String.format("Failed to cleanup private ip addresses for pod %s", pod)); } } @@ -1672,7 +1672,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { // Delete the pod if (!_podDao.remove(podId)) { - throw new CloudRuntimeException("Failed to delete pod " + podId); + throw new CloudRuntimeException(String.format("Failed to delete pod %s", pod)); } // remove from dedicated resources @@ -1717,7 +1717,7 @@ public Pod createPodIpRange(final CreateManagementNetworkIpRangeCmd cmd) { final Account account = CallContext.current().getCallingAccount(); if(!_accountMgr.isRootAdmin(account.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Calling account is not root admin: " + account.getId()); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Calling account is not root admin: %s", account)); } final long podId = cmd.getPodId(); @@ -1765,11 +1765,11 @@ public Pod createPodIpRange(final CreateManagementNetworkIpRangeCmd cmd) { // Because each pod has only one Gateway and Netmask. if (!gateway.equals(pod.getGateway())) { - throw new InvalidParameterValueException("Multiple gateways for the POD: " + pod.getId() + " are not allowed. The Gateway should be same as the existing Gateway " + pod.getGateway()); + throw new InvalidParameterValueException(String.format("Multiple gateways for the POD: %s are not allowed. The Gateway should be same as the existing Gateway %s", pod, pod.getGateway())); } if (!netmask.equals(NetUtils.getCidrNetmask(cidrSize))) { - throw new InvalidParameterValueException("Multiple subnets for the POD: " + pod.getId() + " are not allowed. The Netmask should be same as the existing Netmask " + NetUtils.getCidrNetmask(cidrSize)); + throw new InvalidParameterValueException(String.format("Multiple subnets for the POD: %s are not allowed. The Netmask should be same as the existing Netmask %s", pod, NetUtils.getCidrNetmask(cidrSize))); } // Check if the IP range is valid. @@ -1828,7 +1828,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { lock = _podDao.acquireInLockTable(podId); if (lock == null) { - String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Creation failed."; + String msg = String.format("Unable to acquire lock on table to update the ip range of POD: %s, Creation failed.", pod); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -1916,7 +1916,7 @@ public void deletePodIpRange(final DeleteManagementNetworkIpRangeCmd cmd) throws } if(!foundRange) { - throw new InvalidParameterValueException("The input IP range: " + startIp + "-" + endIp + " of pod: " + podId + "is not present. Please input an existing range."); + throw new InvalidParameterValueException(String.format("The input IP range: %s-%s of pod: %sis not present. Please input an existing range.", startIp, endIp, pod)); } final StringBuilder newPodIpRange = new StringBuilder(); @@ -1941,7 +1941,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { lock = _podDao.acquireInLockTable(podId); if (lock == null) { - String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Deletion failed."; + String msg = String.format("Unable to acquire lock on table to update the ip range of POD: %s, Deletion failed.", pod); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -1955,14 +1955,14 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { for(long ipAddr = NetUtils.ip2Long(startIp); ipAddr <= NetUtils.ip2Long(endIp); ipAddr++) { if (!_privateIpAddressDao.deleteIpAddressByPodDc(NetUtils.long2Ip(ipAddr), podId, pod.getDataCenterId())) { - throw new CloudRuntimeException("Failed to cleanup private ip address: " + NetUtils.long2Ip(ipAddr) + " of Pod: " + podId + " DC: " + pod.getDataCenterId()); + throw new CloudRuntimeException(String.format("Failed to cleanup private ip address: %s of Pod: %s DC: %s", NetUtils.long2Ip(ipAddr), pod, _zoneDao.findById(pod.getDataCenterId()))); } } } }); } catch (final Exception e) { - logger.error("Unable to delete Pod " + podId + "IP range due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to delete Pod " + podId + "IP range. Please contact Cloud Support."); + logger.error("Unable to delete Pod {} IP range due to {}", pod, e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to delete Pod %s IP range. Please contact Cloud Support.", pod)); } messageBus.publish(_name, MESSAGE_DELETE_POD_IP_RANGE_EVENT, PublishScope.LOCAL, pod); @@ -1996,7 +1996,7 @@ public void updatePodIpRange(final UpdatePodManagementNetworkIpRangeCmd cmd) thr final String[] existingPodIpRanges = pod.getDescription().split(","); if (existingPodIpRanges.length == 0) { - throw new InvalidParameterValueException("The IP range cannot be found in the pod: " + podId + " since the existing IP range is empty."); + throw new InvalidParameterValueException(String.format("The IP range cannot be found in the pod: %s since the existing IP range is empty.", pod)); } verifyIpRangeParameters(currentStartIP,currentEndIP); @@ -2023,8 +2023,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final Exception e) { - logger.error("Unable to update Pod " + podId + " IP range due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to update Pod " + podId + " IP range. Please contact Cloud Support."); + logger.error("Unable to update Pod {} IP range due to {}", pod, e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to update Pod %s IP range. Please contact Cloud Support.", pod)); } } @@ -2062,7 +2062,7 @@ private void updatePodIpRangeInDb (long zoneId, long podId, Integer vlanId, Host try { lock = _podDao.acquireInLockTable(podId); if (lock == null) { - String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Update failed."; + String msg = String.format("Unable to acquire lock on table to update the ip range of POD: %s, Update failed.", pod); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -2077,15 +2077,15 @@ private void updatePodIpRangeInDb (long zoneId, long podId, Integer vlanId, Host if (currentIpRange.size() > 0) { for (Long startIP: currentIpRange) { if (!_privateIpAddressDao.deleteIpAddressByPodDc(NetUtils.long2Ip(startIP),podId,zoneId)) { - throw new CloudRuntimeException("Failed to remove private ip address: " + NetUtils.long2Ip(startIP) + " of Pod: " + podId + " DC: " + pod.getDataCenterId()); + throw new CloudRuntimeException(String.format("Failed to remove private ip address: %s of Pod: %s DC: %s", NetUtils.long2Ip(startIP), pod, _zoneDao.findById(pod.getDataCenterId()))); } } } } _podDao.update(podId, pod); } catch (final Exception e) { - logger.error("Unable to update Pod " + podId + " IP range due to database error " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to update Pod " + podId + " IP range. Please contact Cloud Support."); + logger.error("Unable to update Pod {} IP range due to database error {}", pod, e.getMessage(), e); + throw new CloudRuntimeException(String.format("Failed to update Pod %s IP range. Please contact Cloud Support.", pod)); } finally { if (lock != null) { _podDao.releaseFromLockTable(podId); @@ -2172,7 +2172,7 @@ public DataCenterGuestIpv6Prefix doInTransaction(TransactionStatus status) { }); } catch (final Exception e) { logger.error(String.format("Unable to add IPv6 prefix for zone: %s due to %s", zone, e.getMessage()), e); - throw new CloudRuntimeException(String.format("Unable to add IPv6 prefix for zone ID: %s. Please contact Cloud Support.", zone.getUuid())); + throw new CloudRuntimeException(String.format("Unable to add IPv6 prefix for zone ID: %s. Please contact Cloud Support.", zone)); } return dataCenterGuestIpv6Prefix; } @@ -2360,7 +2360,7 @@ public Pod createPod(final long zoneId, final String name, final String startIp, final Account account = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } String cidr = null; @@ -2724,11 +2724,6 @@ public DataCenter editZone(final UpdateZoneCmd cmd) { throw new InvalidParameterValueException("Please enter a valid guest cidr"); } - // Make sure the zone exists - if (!validZone(zoneId)) { - throw new InvalidParameterValueException("A zone with ID: " + zoneId + " does not exist."); - } - final String oldZoneName = zone.getName(); if (zoneName == null) { @@ -2834,8 +2829,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { _networkSvc.addTrafficTypeToPhysicalNetwork(mgmtPhyNetwork.getId(), TrafficType.Storage.toString(), "vlan", mgmtTraffic.getXenNetworkLabel(), mgmtTraffic.getKvmNetworkLabel(), mgmtTraffic.getVmwareNetworkLabel(), mgmtTraffic.getSimulatorNetworkLabel(), mgmtTraffic.getVlan(), mgmtTraffic.getHypervNetworkLabel(), mgmtTraffic.getOvm3NetworkLabel()); - logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network " + mgmtPhyNetwork.getId() - + " with same configure of management traffic type"); + logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network {} with same configure of management traffic type", mgmtPhyNetwork); } } catch (final InvalidParameterValueException ex) { throw new InvalidParameterValueException("Cannot enable this Zone since: " + ex.getMessage()); @@ -2859,7 +2853,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { if (resource != null) { resourceId = resource.getId(); if (!_dedicatedDao.remove(resourceId)) { - throw new CloudRuntimeException("Failed to delete dedicated Zone Resource " + resourceId); + throw new CloudRuntimeException(String.format("Failed to delete dedicated Zone Resource %s", resource)); } // find the group associated and check if there are any more // resources under that group @@ -3281,18 +3275,18 @@ protected ServiceOfferingVO createServiceOffering(final long userId, final boole final Account account = _accountDao.findById(user.getAccountId()); if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to create public service offering by admin: %s because it is domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create public service offering by admin: %s because it is domain-admin", user)); } if (!org.apache.commons.lang3.StringUtils.isAllBlank(tags, hostTag) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to create service offerings with storage tags or host tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to create service offerings with storage tags or host tags.", user)); } for (Long domainId : filteredDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to create service offering by another domain-admin: %s for domain: %s", user.getUuid(), _entityMgr.findById(Domain.class, domainId).getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create service offering by another domain-admin: %s for domain: %s", user, _entityMgr.findById(Domain.class, domainId).getUuid())); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to create service offering by user: %s because it is not root-admin or domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create service offering by user: %s because it is not root-admin or domain-admin", user)); } final ProvisioningType typedProvisioningType = ProvisioningType.getProvisioningType(provisioningType); @@ -3606,23 +3600,23 @@ public ServiceOffering updateServiceOffering(final UpdateServiceOfferingCmd cmd) if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (!filteredZoneIds.equals(existingZoneIds)) { // Domain-admins cannot update zone(s) for offerings - throw new InvalidParameterValueException(String.format("Unable to update zone(s) for service offering: %s by admin: %s as it is domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update zone(s) for service offering: %s by admin: %s as it is domain-admin", offeringHandle, user)); } if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update public service offering: %s by user: %s because it is domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update public service offering: %s by user: %s because it is domain-admin", offeringHandle, user)); } else { if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s to a public offering by user: %s because it is domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s to a public offering by user: %s because it is domain-admin", offeringHandle, user)); } } if (!org.apache.commons.lang3.StringUtils.isAllBlank(hostTags, storageTags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to update storage tags or host tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to update storage tags or host tags.", user)); } List nonChildDomains = new ArrayList<>(); for (Long domainId : existingDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { if (name != null || displayText != null || sortKey != null) { // Domain-admins cannot update name, display text, sort key for offerings with domain which are not child domains for domain-admin - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offeringHandle, user)); } nonChildDomains.add(domainId); } @@ -3630,12 +3624,12 @@ public ServiceOffering updateServiceOffering(final UpdateServiceOfferingCmd cmd) for (Long domainId : filteredDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { Domain domain = _entityMgr.findById(Domain.class, domainId); - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by domain-admin: %s with domain: %3$s which is not a child domain", offeringHandle.getUuid(), user.getUuid(), domain.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by domain-admin: %s with domain: %s which is not a child domain", offeringHandle, user, domain)); } } filteredDomainIds.addAll(nonChildDomains); // Final list must include domains which were not child domain for domain-admin but specified for this offering prior to update } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by id user: %s because it is not root-admin or domain-admin", offeringHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update service offering: %s by id user: %s because it is not root-admin or domain-admin", offeringHandle, user)); } final boolean updateNeeded = name != null || displayText != null || sortKey != null || storageTags != null || hostTags != null || state != null; @@ -3795,18 +3789,18 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List final Account account = _accountDao.findById(user.getAccountId()); if (account.getType() == Account.Type.DOMAIN_ADMIN) { if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to create public disk offering by admin: %s because it is domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create public disk offering by admin: %s because it is domain-admin", user)); } if (StringUtils.isNotBlank(tags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to create disk offerings with storage tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to create disk offerings with storage tags.", user)); } for (Long domainId : filteredDomainIds) { if (domainId == null || !_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to create disk offering by another domain-admin: %s for domain: %s", user.getUuid(), _entityMgr.findById(Domain.class, domainId).getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create disk offering by another domain-admin: %s for domain: %s", user, _entityMgr.findById(Domain.class, domainId).getUuid())); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to create disk offering by user: %s because it is not root-admin or domain-admin", user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to create disk offering by user: %s because it is not root-admin or domain-admin", user)); } tags = com.cloud.utils.StringUtils.cleanupTags(tags); @@ -4074,7 +4068,7 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { checkDomainAdminUpdateOfferingRestrictions(diskOfferingHandle, user, filteredZoneIds, existingZoneIds, existingDomainIds, filteredDomainIds); if (StringUtils.isNotBlank(tags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { - throw new InvalidParameterValueException(String.format("User [%s] is unable to update disk offering tags.", user.getUuid())); + throw new InvalidParameterValueException(String.format("User [%s] is unable to update disk offering tags.", user)); } List nonChildDomains = getAccountNonChildDomains(diskOfferingHandle, account, user, cmd, existingDomainIds); @@ -4083,7 +4077,7 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { filteredDomainIds.addAll(nonChildDomains); // Final list must include domains which were not child domain for domain-admin but specified for this offering prior to update } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s by id user: %s because it is not root-admin or domain-admin", diskOfferingHandle.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s by id user: %s because it is not root-admin or domain-admin", diskOfferingHandle, user)); } boolean updateNeeded = shouldUpdateDiskOffering(name, displayText, sortKey, displayDiskOffering, tags, cacheMode, state) || @@ -4216,13 +4210,13 @@ protected void updateDiskOfferingDetailsZoneIds(List detai protected void checkDomainAdminUpdateOfferingRestrictions(DiskOffering diskOffering, User user, List filteredZoneIds, List existingZoneIds, List existingDomainIds, List filteredDomainIds) { if (!filteredZoneIds.equals(existingZoneIds)) { - throw new InvalidParameterValueException(String.format("Unable to update zone(s) for disk offering [%s] by admin [%s] as it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update zone(s) for disk offering [%s] by admin [%s] as it is domain-admin.", diskOffering, user)); } if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update public disk offering [%s] by user [%s] because it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update public disk offering [%s] by user [%s] because it is domain-admin.", diskOffering, user)); } if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] to a public offering by user [%s] because it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] to a public offering by user [%s] because it is domain-admin.", diskOffering, user)); } } @@ -4325,7 +4319,7 @@ protected void updateServiceOfferingHostTagsIfNotNull(String hostTags, ServiceOf } throw new InvalidParameterValueException(String.format("There are active VMs using offering [%s], and the hosts [%s] don't have the new tags", - offering.getId(), hosts)); + offering, hosts)); } } offering.setHostTag(hostTags); @@ -4374,15 +4368,15 @@ public boolean deleteDiskOffering(final DeleteDiskOfferingCmd cmd) { if (account.getType() == Account.Type.DOMAIN_ADMIN) { List existingDomainIds = diskOfferingDetailsDao.findDomainIds(diskOfferingId); if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to delete public disk offering: %s by admin: %s because it is domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete public disk offering: %s by admin: %s because it is domain-admin", offering, user)); } for (Long domainId : existingDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering, user)); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s by user: %s because it is not root-admin or domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete disk offering: %s by user: %s because it is not root-admin or domain-admin", offering, user)); } annotationDao.removeByEntityType(AnnotationService.EntityType.DISK_OFFERING.name(), offering.getUuid()); @@ -4433,7 +4427,7 @@ public boolean deleteServiceOffering(final DeleteServiceOfferingCmd cmd) { // Verify disk offering id mapped to the service offering final DiskOfferingVO diskOffering = _diskOfferingDao.findById(offering.getDiskOfferingId()); if (diskOffering == null) { - throw new InvalidParameterValueException("unable to find disk offering " + offering.getDiskOfferingId() + " mapped to the service offering " + offeringId); + throw new InvalidParameterValueException("unable to find disk offering " + offering.getDiskOfferingId() + " mapped to the service offering " + offering); } if (offering.getDefaultUse()) { @@ -4448,22 +4442,22 @@ public boolean deleteServiceOffering(final DeleteServiceOfferingCmd cmd) { if (account.getType() == Account.Type.DOMAIN_ADMIN) { List existingDomainIds = _serviceOfferingDetailsDao.findDomainIds(offeringId); if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to delete public service offering: %s by admin: %s because it is domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete public service offering: %s by admin: %s because it is domain-admin", offering, user)); } for (Long domainId : existingDomainIds) { if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", offering, user)); } } } else if (account.getType() != Account.Type.ADMIN) { - throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s by user: %s because it is not root-admin or domain-admin", offering.getUuid(), user.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to delete service offering: %s by user: %s because it is not root-admin or domain-admin", offering, user)); } annotationDao.removeByEntityType(AnnotationService.EntityType.SERVICE_OFFERING.name(), offering.getUuid()); if (diskOffering.isComputeOnly()) { diskOffering.setState(DiskOffering.State.Inactive); if (!_diskOfferingDao.update(diskOffering.getId(), diskOffering)) { - throw new CloudRuntimeException(String.format("Unable to delete disk offering %s mapped to the service offering %s", diskOffering.getUuid(), offering.getUuid())); + throw new CloudRuntimeException(String.format("Unable to delete disk offering %s mapped to the service offering %s", diskOffering, offering)); } } offering.setState(ServiceOffering.State.Inactive); @@ -4625,7 +4619,7 @@ public Vlan createVlanAndPublicIpRange(final CreateVlanIpRangeCmd cmd) throws In final Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } if (zone.isSecurityGroupEnabled() && zone.getNetworkType() != DataCenter.NetworkType.Basic && forVirtualNetwork) { @@ -4640,7 +4634,7 @@ public Vlan createVlanAndPublicIpRange(final CreateVlanIpRangeCmd cmd) throws In networkId = _networkModel.getSystemNetworkByZoneAndTrafficType(zoneId, TrafficType.Public).getId(); network = _networkModel.getNetwork(networkId); } else if (network.getGuestType() != null || network.getTrafficType() != TrafficType.Public) { - throw new InvalidParameterValueException("Can't find Public network by id=" + networkId); + throw new InvalidParameterValueException(String.format("Can't find Public network %s", network)); } } else { if (network == null) { @@ -4658,7 +4652,7 @@ public Vlan createVlanAndPublicIpRange(final CreateVlanIpRangeCmd cmd) throws In } else if (network.getGuestType() == null || network.getGuestType() == Network.GuestType.Isolated && _ntwkOffServiceMapDao.areServicesSupportedByNetworkOffering(network.getNetworkOfferingId(), Service.SourceNat)) { - throw new InvalidParameterValueException("Can't create direct vlan for network id=" + networkId + " with type: " + network.getGuestType()); + throw new InvalidParameterValueException(String.format("Can't create direct vlan for network %s with type: %s", network, network.getGuestType())); } } @@ -4671,8 +4665,7 @@ public Vlan createVlanAndPublicIpRange(final CreateVlanIpRangeCmd cmd) throws In if (zone.getNetworkType() == DataCenter.NetworkType.Advanced) { if (network.getTrafficType() == TrafficType.Guest) { if (network.getGuestType() != GuestType.Shared) { - throw new InvalidParameterValueException("Can execute createVLANIpRanges on shared guest network, but type of this guest network " + network.getId() + " is " - + network.getGuestType()); + throw new InvalidParameterValueException(String.format("Can execute createVLANIpRanges on shared guest network, but type of this guest network %s is %s", network, network.getGuestType())); } final List vlans = _vlanDao.listVlansByNetworkId(network.getId()); @@ -4681,8 +4674,7 @@ public Vlan createVlanAndPublicIpRange(final CreateVlanIpRangeCmd cmd) throws In if (vlanId == null || vlanId.contains(Vlan.UNTAGGED)) { vlanId = vlan.getVlanTag(); } else if (!NetUtils.isSameIsolationId(vlan.getVlanTag(), vlanId)) { - throw new InvalidParameterValueException("there is already one vlan " + vlan.getVlanTag() + " on network :" + +network.getId() - + ", only one vlan is allowed on guest network"); + throw new InvalidParameterValueException(String.format("there is already one vlan %s on network :%s, only one vlan is allowed on guest network", vlan.getVlanTag(), network)); } } sameSubnet = validateIpRange(startIP, endIP, newVlanGateway, newVlanNetmask, vlans, ipv4, ipv6, ip6Gateway, ip6Cidr, startIPv6, endIPv6, network); @@ -4739,7 +4731,7 @@ public Vlan doInTransaction(final TransactionStatus status) { if (supportsMultipleSubnets == null || !Boolean.valueOf(supportsMultipleSubnets)) { throw new InvalidParameterValueException("The dhcp service provider for this network does not support dhcp across multiple subnets"); } - logger.info("adding a new subnet to the network " + network.getId()); + logger.info("adding a new subnet to the network {}", network); } else if (sameSubnet != null) { // if it is same subnet the user might not send the vlan and the // netmask details. so we are @@ -4913,7 +4905,7 @@ public Vlan createVlanAndPublicIpRange(final long zoneId, final long networkId, throw new InvalidParameterValueException("Please specify a valid pod."); } if (pod.getDataCenterId() != zoneId) { - throw new InvalidParameterValueException("Pod id=" + podId + " doesn't belong to zone id=" + zoneId); + throw new InvalidParameterValueException(String.format("Pod %s doesn't belong to zone id=%d", pod, zoneId)); } // pod vlans can be created in basic zone only if (zone.getNetworkType() != NetworkType.Basic || network.getTrafficType() != TrafficType.Guest) { @@ -5306,7 +5298,7 @@ private void updateVlanAndIpv4Range(final long id, final VlanVO vlanRange, } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + id + " is acquired"); + logger.debug("lock on vlan {} is acquired", range); } commitUpdateVlanAndIpRange(id, newStartIP, newEndIP, currentStartIP, currentEndIP, gateway, netmask,true, isRangeForSystemVM, forSystemVms); @@ -5364,7 +5356,7 @@ private void updateVlanAndIpv6Range(final long id, final VlanVO vlanRange, } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + id + " is acquired"); + logger.debug("lock on vlan {} is acquired", range); } commitUpdateVlanAndIpRange(id, startIpv6, endIpv6, currentStartIPv6, currentEndIPv6, ip6Gateway, ip6Cidr, false, isRangeForSystemVM,forSystemVms); @@ -5385,7 +5377,7 @@ private VlanVO commitUpdateVlanAndIpRange(final Long id, final String newStartIP @Override public VlanVO doInTransaction(final TransactionStatus status) { VlanVO vlanRange = _vlanDao.findById(id); - logger.debug("Updating vlan range " + vlanRange.getId()); + logger.debug("Updating vlan range {}", vlanRange); if (ipv4) { vlanRange.setIpRange(newStartIP + "-" + newEndIP); vlanRange.setVlanGateway(gateway); @@ -5496,32 +5488,28 @@ public VlanVO deleteVlanAndPublicIpRange(final long userId, final long vlanDbId, } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + vlanDbId + " is acquired"); + logger.debug("lock on vlan {} is acquired", vlanRange); } for (final IPAddressVO ip : ips) { boolean success = true; if (ip.isOneToOneNat()) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip - + " belonging to the range is used for static nat purposes. Cleanup the rules first"); + throw new InvalidParameterValueException(String.format("Can't delete account specific vlan %s as ip %s belonging to the range is used for static nat purposes. Cleanup the rules first", vlanRange, ip)); } if (ip.isSourceNat()) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip - + " belonging to the range is a source nat ip for the network id=" + ip.getSourceNetworkId() - + ". IP range with the source nat ip address can be removed either as a part of Network, or account removal"); + throw new InvalidParameterValueException(String.format("Can't delete account specific vlan %s as ip %s belonging to the range is a source nat ip for the network id=%d. IP range with the source nat ip address can be removed either as a part of Network, or account removal", vlanRange, ip, ip.getSourceNetworkId())); } if (_firewallDao.countRulesByIpId(ip.getId()) > 0) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip - + " belonging to the range has firewall rules applied. Cleanup the rules first"); + throw new InvalidParameterValueException(String.format("Can't delete account specific vlan %s as ip %s belonging to the range has firewall rules applied. Cleanup the rules first", vlanRange, ip)); } if (ip.getAllocatedTime() != null) { // This means IP is allocated // release public ip address here - success = _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); + success = _ipAddrMgr.disassociatePublicIpAddress(ip, userId, caller); } if (!success) { - logger.warn("Some ip addresses failed to be released as a part of vlan " + vlanDbId + " removal"); + logger.warn("Some ip addresses failed to be released as a part of vlan {} removal", vlanRange); } else { resourceCountToBeDecrement++; final boolean usageHidden = _ipAddrMgr.isUsageHidden(ip); @@ -5539,8 +5527,7 @@ public VlanVO deleteVlanAndPublicIpRange(final long userId, final long vlanDbId, final NicIpAliasVO ipAlias = _nicIpAliasDao.findByGatewayAndNetworkIdAndState(vlanRange.getVlanGateway(), vlanRange.getNetworkId(), NicIpAlias.State.active); //check if the ipalias belongs to the vlan range being deleted. if (ipAlias != null && vlanDbId == _publicIpAddressDao.findByIpAndSourceNetworkId(vlanRange.getNetworkId(), ipAlias.getIp4Address()).getVlanId()) { - throw new InvalidParameterValueException("Cannot delete vlan range " + vlanDbId + " as " + ipAlias.getIp4Address() - + "is being used for providing dhcp service in this subnet. Delete all VMs in this subnet and try again"); + throw new InvalidParameterValueException(String.format("Cannot delete vlan range %s as %sis being used for providing dhcp service in this subnet. Delete all VMs in this subnet and try again", vlanRange, ipAlias.getIp4Address())); } final long allocIpCount = _publicIpAddressDao.countIPs(vlanRange.getDataCenterId(), vlanDbId, true); if (allocIpCount > 0) { @@ -5552,21 +5539,22 @@ public VlanVO deleteVlanAndPublicIpRange(final long userId, final long vlanDbId, throw new InvalidParameterValueException(String.format("%d IPv6 addresses are in use. Cannot delete this vlan", ipAddresses.size())); } + VlanVO finalVlanRange = vlanRange; Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { _publicIpAddressDao.deletePublicIPRange(vlanDbId); - logger.debug(String.format("Delete Public IP Range (from user_ip_address, where vlan_db_id=%s)", vlanDbId)); + logger.debug("Delete Public IP Range (from user_ip_address, where vlan_db_id={})", vlanDbId); _vlanDao.remove(vlanDbId); - logger.debug(String.format("Mark vlan as Remove vlan (vlan_db_id=%s)", vlanDbId)); + logger.debug("Mark vlan as Remove vlan (vlan_db_id={})", vlanDbId); SearchBuilder sb = podVlanMapDao.createSearchBuilder(); sb.and("vlan_db_id", sb.entity().getVlanDbId(), SearchCriteria.Op.EQ); SearchCriteria sc = sb.create(); sc.setParameters("vlan_db_id", vlanDbId); podVlanMapDao.remove(sc); - logger.debug(String.format("Delete vlan_db_id=%s in pod_vlan_map", vlanDbId)); + logger.debug("Delete vlan_db_id={} in pod_vlan_map", vlanDbId); } }); @@ -5701,16 +5689,16 @@ public boolean releasePublicIpRange(final ReleasePublicIpRangeCmd cmd) { throw new InvalidParameterValueException("Please specify a valid IP range id."); } - return releasePublicIpRange(vlanDbId, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount()); + return releasePublicIpRange(vlanDbId, CallContext.current().getCallingUser(), CallContext.current().getCallingAccount()); } @DB - public boolean releasePublicIpRange(final long vlanDbId, final long userId, final Account caller) { + public boolean releasePublicIpRange(final long vlanDbId, final User user, final Account caller) { VlanVO vlan = _vlanDao.findById(vlanDbId); if(vlan == null) { // Nothing to do if vlan can't be found - logger.warn(String.format("Skipping the process for releasing public IP range as could not find a VLAN with ID '%s' for Account '%s' and User '%s'." - ,vlanDbId, caller, userId)); + logger.warn("Skipping the process for releasing public IP range as could not find a VLAN with ID '{}' for Account '{}' and User '{}'.", + vlanDbId, caller, user); return true; } @@ -5745,21 +5733,21 @@ public boolean releasePublicIpRange(final long vlanDbId, final long userId, fina throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } if (logger.isDebugEnabled()) { - logger.debug("lock vlan " + vlanDbId + " is acquired"); + logger.debug("lock on vlan {} is acquired", vlan); } for (final IPAddressVO ip : ips) { // Disassociate allocated IP's that are not in use if (!ip.isOneToOneNat() && !ip.isSourceNat() && !(_firewallDao.countRulesByIpId(ip.getId()) > 0)) { if (logger.isDebugEnabled()) { - logger.debug("Releasing Public IP addresses" + ip + " of vlan " + vlanDbId + " as part of Public IP" + " range release to the system pool"); + logger.debug("Releasing Public IP addresses {} of vlan {} as part of Public IP range release to the system pool", ip, vlan); } - success = success && _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); + success = success && _ipAddrMgr.disassociatePublicIpAddress(ip, user.getId(), caller); } else { ipsInUse.add(ip); } } if (!success) { - logger.warn("Some Public IP addresses that were not in use failed to be released as a part of" + " vlan " + vlanDbId + "release to the system pool"); + logger.warn("Some Public IP addresses that were not in use failed to be released as a part of vlan {} release to the system pool", vlan); } } finally { _vlanDao.releaseFromLockTable(vlanDbId); @@ -6093,16 +6081,16 @@ public void checkDiskOfferingAccess(final Account caller, final DiskOffering dof for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, dof, zone)) { if (logger.isDebugEnabled()) { - logger.debug("Access granted to " + caller + " to disk offering:" + dof.getId() + " by " + checker.getName()); + logger.debug("Access granted to {} to disk offering: {} by {}", caller, dof, checker.getName()); } return; } else { - throw new PermissionDeniedException(String.format("Access denied to %s for disk offering: %s, zone: %s by %s", caller, dof.getUuid(), zone.getUuid(), checker.getName())); + throw new PermissionDeniedException(String.format("Access denied to %s for disk offering: %s, zone: %s by %s", caller, dof, zone, checker.getName())); } } assert false : "How can all of the security checkers pass on checking this caller?"; - throw new PermissionDeniedException("There's no way to confirm " + caller + " has access to disk offering:" + dof.getId()); + throw new PermissionDeniedException(String.format("There's no way to confirm %s has access to disk offering:%s", caller, dof)); } @Override @@ -6110,16 +6098,16 @@ public void checkZoneAccess(final Account caller, final DataCenter zone) { for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, zone)) { if (logger.isDebugEnabled()) { - logger.debug("Access granted to " + caller + " to zone:" + zone.getId() + " by " + checker.getName()); + logger.debug("Access granted to {} to zone:{} by {}", caller, zone, checker.getName()); } return; } else { - throw new PermissionDeniedException("Access denied to " + caller + " by " + checker.getName() + " for zone " + zone.getId()); + throw new PermissionDeniedException(String.format("Access denied to %s by %s for zone %s", caller, checker.getName(), zone)); } } assert false : "How can all of the security checkers pass on checking this caller?"; - throw new PermissionDeniedException("There's no way to confirm " + caller + " has access to zone:" + zone.getId()); + throw new PermissionDeniedException(String.format("There's no way to confirm %s has access to zone:%s", caller, zone)); } @Override @@ -7338,8 +7326,7 @@ public boolean deleteNetworkOffering(final DeleteNetworkOfferingCmd cmd) { // though) final int networkCount = _networkDao.getNetworkCountByNetworkOffId(offeringId); if (networkCount > 0) { - throw new InvalidParameterValueException("Can't delete network offering " + offeringId + " as its used by " + networkCount + " networks. " - + "To make the network offering unavailable, disable it"); + throw new InvalidParameterValueException(String.format("Can't delete network offering %s as its used by %d networks. To make the network offering unavailable, disable it", offering, networkCount)); } annotationDao.removeByEntityType(AnnotationService.EntityType.NETWORK_OFFERING.name(), offering.getUuid()); @@ -7455,7 +7442,7 @@ public NetworkOffering updateNetworkOffering(final UpdateNetworkOfferingCmd cmd) if (oldTags != null) { long oldPhysicalNetworkId = _networkModel.findPhysicalNetworkId(zoneId, oldTags, trafficType); if (newPhysicalNetworkId != oldPhysicalNetworkId) { - throw new InvalidParameterValueException("New tags: selects different physical network for zone " + zoneId); + throw new InvalidParameterValueException(String.format("New tags: selects different physical network for zone %s", dataCenter)); } } } @@ -7563,8 +7550,10 @@ public AccountVO markDefaultZone(final String accountName, final long domainId, // Check if the account exists final Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null) { - logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); - throw new InvalidParameterValueException("Account by name: " + accountName + " doesn't exist in domain " + domainId); + DomainVO domain = _domainDao.findById(domainId); + String domainStr = domain == null ? String.valueOf(domainId) : domain.toString(); + logger.error("Unable to find account by name: {} in domain {}", accountName, domainStr); + throw new InvalidParameterValueException(String.format("Account by name: %s doesn't exist in domain %s", accountName, domainStr)); } // Don't allow modification of system account @@ -7678,16 +7667,16 @@ public List listNetworkOfferings(final TrafficType tr @Override @DB - public boolean releaseDomainSpecificVirtualRanges(final long domainId) { - final List maps = _domainVlanMapDao.listDomainVlanMapsByDomain(domainId); + public boolean releaseDomainSpecificVirtualRanges(final Domain domain) { + final List maps = _domainVlanMapDao.listDomainVlanMapsByDomain(domain.getId()); if (CollectionUtils.isNotEmpty(maps)) { try { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { for (DomainVlanMapVO map : maps) { - if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser().getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { - throw new CloudRuntimeException("Failed to release domain specific virtual ip ranges for domain id=" + domainId); + if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { + throw new CloudRuntimeException(String.format("Failed to release domain specific virtual ip ranges for domain %s", domain)); } } } @@ -7697,23 +7686,23 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { return false; } } else { - logger.trace("Domain id=" + domainId + " has no domain specific virtual ip ranges, nothing to release"); + logger.trace("Domain {} has no domain specific virtual ip ranges, nothing to release", domain); } return true; } @Override @DB - public boolean releaseAccountSpecificVirtualRanges(final long accountId) { - final List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(accountId); + public boolean releaseAccountSpecificVirtualRanges(final Account account) { + final List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(account.getId()); if (maps != null && !maps.isEmpty()) { try { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { for (final AccountVlanMapVO map : maps) { - if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser().getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { - throw new CloudRuntimeException("Failed to release account specific virtual ip ranges for account id=" + accountId); + if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { + throw new CloudRuntimeException(String.format("Failed to release account specific virtual ip ranges for account %s", account)); } } } @@ -7723,7 +7712,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { return false; } } else { - logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); + logger.trace("Account {} has no account specific virtual ip ranges, nothing to release", account); } return true; } @@ -7834,9 +7823,9 @@ public PortableIpRange createPortableIpRange(final CreatePortableIpRangeCmd cmd) if (zones != null && !zones.isEmpty()) { for (final DataCenterVO zone : zones) { // check if there is zone vlan with same id - if (_vlanDao.findByZoneAndVlanId(zone.getId(), vlanId) != null) { - throw new InvalidParameterValueException("Found a VLAN id " + vlanId + " already existing in" + " zone " + zone.getUuid() - + " that conflicts with VLAN id of the portable ip range being configured"); + VlanVO vlanVO = _vlanDao.findByZoneAndVlanId(zone.getId(), vlanId); + if (vlanVO != null) { + throw new InvalidParameterValueException(String.format("Found a VLAN id %s already existing in zone %s that conflicts with VLAN id of the portable ip range being configured", vlanVO, zone)); } //check if there is a public ip range that overlaps with portable ip range being created checkOverlapPublicIpRange(zone.getId(), startIP, endIP); diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java index 76e019df1b3e..863307035eef 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java @@ -21,6 +21,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.HostPodDao; import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.security.keys.KeysManager; @@ -38,7 +39,6 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.ConsoleProxyVO; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -52,6 +52,8 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol @Inject protected HostDao _hostDao; @Inject + protected HostPodDao podDao; + @Inject protected UserVmDao _userVmDao; protected String _consoleProxyUrlDomain; @Inject @@ -140,17 +142,11 @@ HostVO findHost(VMInstanceVO vm) { } @Override - public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { - UserVmVO userVm = _userVmDao.findById(userVmId); - if (userVm == null) { - logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); - return null; - } - + public ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm) { HostVO host = findHost(userVm); if (host != null) { if (logger.isDebugEnabled()) { - logger.debug("Assign embedded console proxy running at " + host.getName() + " to user vm " + userVmId + " with public IP " + host.getPublicIpAddress()); + logger.debug("Assign embedded console proxy running at {} to user vm {} with public IP {}", host, userVm, host.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest @@ -172,7 +168,7 @@ public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM {} will be temporarily unavailable.", userVm); } return null; } diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java index 60e2265c41c9..9e4710c89afa 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java @@ -22,7 +22,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.info.ConsoleProxyInfo; -import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; /** * This class is intended to replace the use of console proxy VMs managed by the Apache CloudStack (ACS) @@ -31,12 +31,7 @@ public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsoleProxyManager { @Override - public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { - UserVmVO userVm = _userVmDao.findById(userVmId); - if (userVm == null) { - logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); - return null; - } + public ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm) { HostVO host = findHost(userVm); if (host != null) { @@ -60,21 +55,22 @@ public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { } if (allocatedHost == null) { if (logger.isDebugEnabled()) { - logger.debug("Failed to find a console proxy at host: " + host.getName() + " and in the pod: " + host.getPodId() + " to user vm " + userVmId); + logger.debug("Failed to find a console proxy at host: {} and in the pod: {} to user vm {}", + host::toString, () -> podDao.findById(host.getPodId()), userVm::toString); } return null; } if (logger.isDebugEnabled()) { - logger.debug("Assign standalone console proxy running at " + allocatedHost.getName() + " to user vm " + userVmId + " with public IP " - + allocatedHost.getPublicIpAddress()); + logger.debug("Assign standalone console proxy running at {} to user vm {} with public IP {}", allocatedHost, userVm, allocatedHost.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest of all are place-holder values String publicIp = allocatedHost.getPublicIpAddress(); if (publicIp == null) { if (logger.isDebugEnabled()) { - logger.debug("Host " + allocatedHost.getName() + "/" + allocatedHost.getPrivateIpAddress() - + " does not have public interface, we will return its private IP for cosole proxy."); + logger.debug("Host {} (private IP address: {}) does not have public " + + "interface, we will return its private IP for console proxy.", + allocatedHost, allocatedHost.getPrivateIpAddress()); } publicIp = allocatedHost.getPrivateIpAddress(); } @@ -86,7 +82,7 @@ public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM {} will be temporarily unavailable.", userVm); } return null; } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 53f76f8ad420..3db02f917757 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -329,8 +329,8 @@ protected HostVO findConsoleProxyHost(StartupProxyCommand startupCmd) { } @Override - public ConsoleProxyInfo assignProxy(final long dataCenterId, final long vmId) { - ConsoleProxyVO proxy = doAssignProxy(dataCenterId, vmId); + public ConsoleProxyInfo assignProxy(final long dataCenterId, final VMInstanceVO userVm) { + ConsoleProxyVO proxy = doAssignProxy(dataCenterId, userVm); if (proxy == null) { return null; } @@ -355,14 +355,8 @@ public ConsoleProxyInfo assignProxy(final long dataCenterId, final long vmId) { return info; } - public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { + public ConsoleProxyVO doAssignProxy(long dataCenterId, VMInstanceVO vm) { ConsoleProxyVO proxy = null; - VMInstanceVO vm = vmInstanceDao.findById(vmId); - - if (vm == null) { - logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); - return null; - } if (!availableVmStateOnAssignProxy.contains(vm.getState())) { if (logger.isInfoEnabled()) { @@ -379,17 +373,17 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { if (proxy != null) { if (!isInAssignableState(proxy)) { if (logger.isInfoEnabled()) { - logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : " + vmId); + logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : {}", vm); } proxy = null; } else { if (consoleProxyDao.getProxyActiveLoad(proxy.getId()) < capacityPerProxy || hasPreviousSession(proxy, vm)) { if (logger.isDebugEnabled()) { - logger.debug("Assign previous allocated console proxy for user vm : " + vmId); + logger.debug("Assign previous allocated console proxy for user vm: {}", vm); } if (proxy.getActiveSession() >= capacityPerProxy) { - logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); + logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm: {}", vm); } } else { proxy = null; @@ -405,8 +399,8 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { allocProxyLock.unlock(); } } else { - logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + - ". Previous console proxy allocation is taking too long"); + logger.error("Unable to acquire synchronization lock to get/allocate proxy " + + "resource for vm: {}. Previous console proxy allocation is taking too long", vm); } if (proxy == null) { @@ -415,7 +409,7 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { } if (vm.getProxyId() == null || vm.getProxyId() != proxy.getId()) { - vmInstanceDao.updateProxyId(vmId, proxy.getId(), DateUtil.currentGMTTime()); + vmInstanceDao.updateProxyId(vm.getId(), proxy.getId(), DateUtil.currentGMTTime()); } proxy.setSslEnabled(sslEnabled); @@ -504,8 +498,9 @@ public void startProxyForHA(VirtualMachine vm, Map zoneHostInfoMap, long dataCenterId) { - List hosts = hostDao.listByDataCenterId(dataCenterId); + public boolean isZoneReady(Map zoneHostInfoMap, DataCenter dataCenter) { + List hosts = hostDao.listByDataCenterId(dataCenter.getId()); if (CollectionUtils.isEmpty(hosts)) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); + logger.debug("Zone {} has no host available which is enabled and in Up state", dataCenter); } return false; } - ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); + ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenter.getId()); if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { - VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); + VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenter.getId(), HypervisorType.Any); if (template == null) { if (logger.isDebugEnabled()) { - logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); + logger.debug("System vm template is not ready at data center {}, wait until it is ready to launch console proxy vm", dataCenter); } return false; } @@ -893,12 +889,12 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen if (template.isDirectDownload()) { templateHostRef = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); } else { - templateHostRef = templateDataStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dataCenterId, Status.DOWNLOADED); + templateHostRef = templateDataStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dataCenter.getId(), Status.DOWNLOADED); } if (templateHostRef != null) { - Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); - List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenterId, useLocalStorage); + Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenter.getId())); + List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenter.getId(), useLocalStorage); if (CollectionUtils.isNotEmpty(l) && l.get(0).second() > 0) { return true; } else { @@ -908,7 +904,7 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen } } else { if (logger.isDebugEnabled()) { - logger.debug(String.format("Zone [%s] is ready, but console proxy template [%s] is not ready on secondary storage.", dataCenterId, template.getId())); + logger.debug("Zone [{}] is ready, but console proxy template [{}] is not ready on secondary storage.", dataCenter, template); } } } @@ -1101,7 +1097,7 @@ public boolean destroyProxy(long vmId) { consoleProxyDao.remove(vmId); HostVO host = hostDao.findByTypeNameAndZoneId(proxy.getDataCenterId(), proxy.getHostName(), Host.Type.ConsoleProxy); if (host != null) { - logger.debug(String.format("Removing host [%s] entry for proxy [%s].", host.toString(), vmId)); + logger.debug("Removing host [{}] entry for proxy [{}].", host, proxy); return hostDao.remove(host.getId()); } @@ -1467,7 +1463,7 @@ private void scanManagementState() { private void handleResetSuspending() { List runningProxies = consoleProxyDao.getProxyListInStates(State.Running); for (ConsoleProxyVO proxy : runningProxies) { - logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); + logger.info("Stop console proxy {} because of we are currently in ResetSuspending management mode", proxy); stopProxy(proxy.getId()); } @@ -1509,9 +1505,10 @@ public Long[] getScannablePools() { @Override public boolean isPoolReadyForScan(Long dataCenterId) { - if (!isZoneReady(zoneHostInfoMap, dataCenterId)) { + DataCenterVO zone = dataCenterDao.findById(dataCenterId); + if (!isZoneReady(zoneHostInfoMap, zone)) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); + logger.debug("Zone {} is not ready to launch console proxy yet", zone); } return false; } @@ -1519,14 +1516,14 @@ public boolean isPoolReadyForScan(Long dataCenterId) { List l = consoleProxyDao.getProxyListInStates(VirtualMachine.State.Starting, VirtualMachine.State.Stopping); if (l.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); + logger.debug("Zone {} has {} console proxy VM(s) in transition state", zone, l.size()); } return false; } if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " is ready to launch console proxy"); + logger.debug("Zone {} is ready to launch console proxy", zone); } return true; } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java index 4e0d14d88355..55b366bfae6c 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyService.java @@ -17,9 +17,10 @@ package com.cloud.consoleproxy; import com.cloud.info.ConsoleProxyInfo; +import com.cloud.vm.VMInstanceVO; public interface ConsoleProxyService { - public abstract ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId); + public abstract ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm); } diff --git a/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java index 29a7497fc174..558bff3fdc87 100644 --- a/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java @@ -60,7 +60,7 @@ protected HostVO findHost(VMInstanceVO vm) { } @Override - public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { + public ConsoleProxyInfo assignProxy(long dataCenterId, VMInstanceVO userVm) { return new ConsoleProxyInfo(_sslEnabled, _ip, _consoleProxyPort, _consoleProxyUrlPort, _consoleProxyUrlDomain); } diff --git a/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java b/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java index 79c563ced6eb..bdd66fd5b233 100644 --- a/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java +++ b/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java @@ -26,6 +26,7 @@ import javax.persistence.Table; import com.cloud.utils.NumbersUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "dedicated_resources") @@ -170,4 +171,11 @@ public boolean equals(Object obj) { public int hashCode() { return NumbersUtil.hash(id); } + + @Override + public String toString() { + return String.format("DedicatedResourceVO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid")); + } } diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 2605e46c731f..a3c889cd0707 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -584,7 +584,7 @@ private boolean canUseLastHost(HostVO host, ExcludeList avoids, DeploymentPlan p ServiceOfferingDetailsVO offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString()); ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); - if (offeringDetails != null && !_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) { + if (offeringDetails != null && !_resourceMgr.isGPUDeviceAvailable(host, groupName.getValue(), offeringDetails.getValue())) { logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host does not have the required GPU devices available. Skipping this and trying other available hosts.", vm, host); return false; diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index 46e6c369c331..abaf48400e23 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -133,7 +133,7 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan //check if datacenter is in avoid set if (avoid.shouldAvoid(dc)) { if (logger.isDebugEnabled()) { - logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + logger.debug("DataCenter {} provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.", dc); } return null; } @@ -141,8 +141,8 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan List clusterList = new ArrayList<>(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); - logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); ClusterVO cluster = clusterDao.findById(plan.getClusterId()); + logger.debug("Searching resources only under specified Cluster: {}", cluster != null ? cluster : clusterIdSpecified); if (cluster != null) { if (avoid.shouldAvoid(cluster)) { logger.debug("The specified cluster is in avoid set, returning."); @@ -158,9 +158,9 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan } else if (plan.getPodId() != null) { //consider clusters under this pod only Long podIdSpecified = plan.getPodId(); - logger.debug("Searching resources only under specified Pod: " + podIdSpecified); HostPodVO pod = podDao.findById(podIdSpecified); + logger.debug("Searching resources only under specified Pod: {}", pod != null ? pod : podIdSpecified); if (pod != null) { if (avoid.shouldAvoid(pod)) { logger.debug("The specified pod is in avoid set, returning."); @@ -176,7 +176,7 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan return null; } } else { - logger.debug("Searching all possible resources under this Zone: " + plan.getDataCenterId()); + logger.debug("Searching all possible resources under this Zone: {}", dcDao.findById(plan.getDataCenterId())); boolean applyAllocationAtPods = Boolean.parseBoolean(configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key())); if (applyAllocationAtPods) { diff --git a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java index b65865e732bf..00179103f931 100644 --- a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java @@ -64,23 +64,22 @@ public boolean stop() { } // Host.status is up and Host.type is routing - protected List findHostByPod(long podId, Long excludeHostId) { + protected List findHostByPod(long podId, Long excludeHostId) { QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getType(), Op.EQ, Type.Routing); sc.and(sc.entity().getPodId(), Op.EQ, podId); sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); List hosts = sc.list(); - List hostIds = new ArrayList(hosts.size()); + List hostList = new ArrayList<>(hosts.size()); for (HostVO h : hosts) { - hostIds.add(h.getId()); - } - - if (excludeHostId != null) { - hostIds.remove(excludeHostId); + if (excludeHostId != null && h.getId() == excludeHostId) { + continue; + } + hostList.add(h); } - return hostIds; + return hostList; } // Method only returns Status.Up, Status.Down and Status.Unknown diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index d8fc99a0934e..b297d5261625 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -236,7 +236,7 @@ public Status investigate(final long hostId) { hostState = investigator.isAgentAlive(host); if (hostState != null) { if (logger.isDebugEnabled()) { - logger.debug(investigator.getName() + " was able to determine host " + hostId + " is in " + hostState.toString()); + logger.debug("{} was able to determine host {} is in {}", investigator.getName(), host, hostState.toString()); } return hostState; } @@ -256,11 +256,11 @@ public void scheduleRestartForVmsOnHost(final HostVO host, boolean investigate) } if (host.getHypervisorType() == HypervisorType.VMware || host.getHypervisorType() == HypervisorType.Hyperv) { - logger.info("Don't restart VMs on host " + host.getId() + " as it is a " + host.getHypervisorType().toString() + " host"); + logger.info("Don't restart VMs on host {} as it is a {} host", host, host.getHypervisorType().toString()); return; } - logger.warn("Scheduling restart for VMs on host " + host.getId() + "-" + host.getName()); + logger.warn("Scheduling restart for VMs on host {}", host); final List vms = _instanceDao.listByHostId(host.getId()); final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); @@ -300,13 +300,12 @@ public void scheduleRestartForVmsOnHost(final HostVO host, boolean investigate) continue; } if (logger.isDebugEnabled()) { - logger.debug("Notifying HA Mgr of to restart vm " + vm.getId() + "-" + vm.getInstanceName()); + logger.debug("Notifying HA Mgr of to restart vm {}", vm); } vm = _instanceDao.findByUuid(vm.getUuid()); Long hostId = vm.getHostId(); if (hostId != null && !hostId.equals(host.getId())) { - logger.debug("VM " + vm.getInstanceName() + " is not on down host " + host.getId() + " it is on other host " - + hostId + " VM HA is done"); + logger.debug("VM {} is not on down host {} it is on other host {} VM HA is done", vm, host, hostId); continue; } scheduleRestart(vm, investigate); @@ -342,7 +341,7 @@ public boolean scheduleMigration(final VMInstanceVO vm) { if (vm.getHostId() != null) { final HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), WorkType.Migration, Step.Scheduled, vm.getHostId(), vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - logger.info("Scheduled migration work of VM " + vm.getUuid() + " from host " + _hostDao.findById(vm.getHostId()) + " with HAWork " + work); + logger.info("Scheduled migration work of VM {} from host {} with HAWork {}", vm, _hostDao.findById(vm.getHostId()), work); wakeupWorkers(); } return true; @@ -526,20 +525,20 @@ protected Long restart(final HaWorkVO work) { if (host == null) { host = _hostDao.findByIdIncludingRemoved(work.getHostId()); if (host != null) { - logger.debug("VM " + vm.toString() + " is now no longer on host " + work.getHostId() + " as the host is removed"); + logger.debug("VM {} is now no longer on host {} as the host is removed", vm, host); isHostRemoved = true; } } DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); HostPodVO podVO = _podDao.findById(host.getPodId()); - String hostDesc = "name: " + host.getName() + "(id:" + host.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); + String hostDesc = String.format("%s, availability zone: %s, pod: %s", host, dcVO.getName(), podVO.getName()); Boolean alive = null; if (work.getStep() == Step.Investigating) { if (!isHostRemoved) { if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) { - logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId()); + logger.info("VM {} is now no longer on host {}", vm, host); return null; } @@ -571,7 +570,7 @@ protected Long restart(final HaWorkVO work) { } else if (!alive) { fenced = true; } else { - logger.debug("VM " + vm.getInstanceName() + " is found to be alive by " + investigator.getName()); + logger.debug("VM {} is found to be alive by {}", vm, investigator.getName()); if (host.getStatus() == Status.Up) { logger.info(vm + " is alive and host is up. No need to restart it."); return null; @@ -666,7 +665,7 @@ protected Long restart(final HaWorkVO work) { // First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency. startVm(vm, params, null); } catch (InsufficientCapacityException e){ - logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner", vm); startVm(vm, params, _haPlanners.get(0)); } @@ -685,19 +684,19 @@ protected Long restart(final HaWorkVO work) { } catch (final InsufficientCapacityException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "Insufficient capacity to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("Insufficient capacity to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } catch (final ResourceUnavailableException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The resource is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("The resource is unavailable for trying to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } catch (ConcurrentOperationException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("The Storage is unavailable for trying to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } catch (OperationTimedoutException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The operation timed out while trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, String.format("The operation timed out while trying to restart VM, name: %s, id: %d uuid: %s which was running on host %s", vm.getHostName(), vmId, vm.getUuid(), hostDesc)); } vm = _itMgr.findById(vm.getId()); work.setUpdateTime(vm.getUpdated()); @@ -708,14 +707,14 @@ protected Long restart(final HaWorkVO work) { public Long migrate(final HaWorkVO work) { long vmId = work.getInstanceId(); long srcHostId = work.getHostId(); + HostVO srcHost = _hostDao.findById(srcHostId); VMInstanceVO vm = _instanceDao.findById(vmId); if (vm == null) { logger.info("Unable to find vm: " + vmId + ", skipping migrate."); return null; } - logger.info("Migration attempt: for VM " + vm.getUuid() + "from host id " + srcHostId + - ". Starting attempt: " + (1 + work.getTimesTried()) + "/" + _maxRetries + " times."); + logger.info("Migration attempt: for VM {}from host {}. Starting attempt: {}/{} times.", vm, srcHost, 1 + work.getTimesTried(), _maxRetries); try { work.setStep(Step.Migrating); _haDao.update(work.getId(), work); @@ -724,14 +723,11 @@ public Long migrate(final HaWorkVO work) { _itMgr.migrateAway(vm.getUuid(), srcHostId); return null; } catch (InsufficientServerCapacityException e) { - logger.warn("Migration attempt: Insufficient capacity for migrating a VM " + - vm.getUuid() + " from source host id " + srcHostId + - ". Exception: " + e.getMessage()); + logger.warn("Migration attempt: Insufficient capacity for migrating a VM {} from source host {}. Exception: {}", vm, srcHost, e.getMessage()); _resourceMgr.migrateAwayFailed(srcHostId, vmId); return (System.currentTimeMillis() >> 10) + _migrateRetryInterval; } catch (Exception e) { - logger.warn("Migration attempt: Unexpected exception occurred when attempting migration of " + - vm.getUuid() + e.getMessage()); + logger.warn("Migration attempt: Unexpected exception occurred when attempting migration of {} {}", vm, e.getMessage()); throw e; } } @@ -777,7 +773,7 @@ protected Long destroyVM(final HaWorkVO work) { boolean expunge = VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType()) || VirtualMachine.Type.ConsoleProxy.equals(vm.getType()); if (!expunge && VirtualMachine.State.Destroyed.equals(work.getPreviousState())) { - logger.info("VM " + vm.getUuid() + " already in " + vm.getState() + " state. Throwing away " + work); + logger.info("VM {} already in {} state. Throwing away {}", vm, vm.getState(), work); return null; } try { @@ -786,7 +782,7 @@ protected Long destroyVM(final HaWorkVO work) { destroyVM(vm, expunge); return null; } else { - logger.info("VM " + vm.getUuid() + " still in " + vm.getState() + " state."); + logger.info("VM {} still in {} state.", vm, vm.getState()); } } catch (final AgentUnavailableException e) { logger.debug("Agent is not available" + e.getMessage()); @@ -817,8 +813,9 @@ protected Long stopVM(final HaWorkVO work) throws ConcurrentOperationException { } else if (work.getWorkType() == WorkType.CheckStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + - (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); + HostVO scheduledHost = _hostDao.findById(work.getHostId()); + HostVO currentHost = vm.getHostId() != null ? _hostDao.findById(vm.getHostId()) : null; + logger.info("{} is different now. Scheduled Host: {} Current Host: {} State: {}", vm, scheduledHost, currentHost != null ? currentHost : "none", vm.getState()); return null; } @@ -828,8 +825,9 @@ protected Long stopVM(final HaWorkVO work) throws ConcurrentOperationException { } else if (work.getWorkType() == WorkType.ForceStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + - (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); + HostVO scheduledHost = _hostDao.findById(work.getHostId()); + HostVO currentHost = vm.getHostId() != null ? _hostDao.findById(vm.getHostId()) : null; + logger.info("{} is different now. Scheduled Host: {} Current Host: {} State: {}", vm, scheduledHost, currentHost != null ? currentHost : "none", vm.getState()); return null; } @@ -851,7 +849,7 @@ protected Long stopVM(final HaWorkVO work) throws ConcurrentOperationException { @Override public void cancelScheduledMigrations(final HostVO host) { WorkType type = host.getType() == HostVO.Type.Storage ? WorkType.Stop : WorkType.Migration; - logger.info("Canceling all scheduled migrations from host " + host.getUuid()); + logger.info("Canceling all scheduled migrations from host {}", host); _haDao.deleteMigrationWorkItems(host.getId(), type, _serverId); } diff --git a/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java b/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java index ce45d662082e..0972f2451afb 100644 --- a/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java +++ b/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java @@ -71,9 +71,9 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { continue; } // get the data center IP address, find a host on the pod, use that host to ping the data center IP address - List otherHosts = findHostByPod(vmHost.getPodId(), vm.getHostId()); - for (Long otherHost : otherHosts) { - Status vmState = testIpAddress(otherHost, nic.getIPv4Address()); + List otherHosts = findHostByPod(vmHost.getPodId(), vm.getHostId()); + for (HostVO otherHost : otherHosts) { + Status vmState = testIpAddress(otherHost.getId(), nic.getIPv4Address()); assert vmState != null; // In case of Status.Unknown, next host will be tried if (vmState == Status.Up) { @@ -84,7 +84,7 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { } else if (vmState == Status.Down) { // We can't ping the VM directly...if we can ping the host, then report the VM down. // If we can't ping the host, then we don't have enough information. - Status vmHostState = testIpAddress(otherHost, vmHost.getPrivateIpAddress()); + Status vmHostState = testIpAddress(otherHost.getId(), vmHost.getPrivateIpAddress()); assert vmHostState != null; if (vmHostState == Status.Up) { if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java b/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java index 90d34799d3d8..7d063b3088e4 100644 --- a/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java +++ b/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java @@ -18,6 +18,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -26,6 +27,7 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.PingTestCommand; import com.cloud.host.Host; +import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.NetworkModel; @@ -73,7 +75,8 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { List routers = _vnaMgr.getRoutersForNetwork(nic.getNetworkId()); if (routers == null || routers.isEmpty()) { if (logger.isDebugEnabled()) { - logger.debug("Unable to find a router in network " + nic.getNetworkId() + " to ping " + vm); + logger.debug("Unable to find a router in network {} to ping {}", + _networkMgr.getNetwork(nic.getNetworkId()), vm); } continue; } @@ -102,26 +105,26 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { @Override public Status isAgentAlive(Host agent) { if (logger.isDebugEnabled()) { - logger.debug("checking if agent (" + agent.getId() + ") is alive"); + logger.debug("checking if agent ({}) is alive", agent); } if (agent.getPodId() == null) { return null; } - List otherHosts = findHostByPod(agent.getPodId(), agent.getId()); + List otherHosts = findHostByPod(agent.getPodId(), agent.getId()); - for (Long hostId : otherHosts) { + for (HostVO host : otherHosts) { if (logger.isDebugEnabled()) { - logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")"); + logger.debug("sending ping from ({}) to agent's host ip address ({})", host, agent.getPrivateIpAddress()); } - Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress()); + Status hostState = testIpAddress(host.getId(), agent.getPrivateIpAddress()); assert hostState != null; // In case of Status.Unknown, next host will be tried if (hostState == Status.Up) { if (logger.isDebugEnabled()) { - logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + - ") successful, returning that agent is disconnected"); + logger.debug("ping from ({}) to agent's host ip address ({}) successful, returning that agent is disconnected", + host, agent.getPrivateIpAddress()); } return Status.Disconnected; // the computing host ip is ping-able, but the computing agent is down, report that the agent is disconnected } else if (hostState == Status.Down) { @@ -157,15 +160,17 @@ private Boolean testUserVM(VirtualMachine vm, Nic nic, VirtualRouter router) { if (vm.getHypervisorType() == HypervisorType.XenServer || vm.getHypervisorType() == HypervisorType.KVM) { otherHosts.add(router.getHostId()); } else { - otherHosts = findHostByPod(router.getPodIdToDeployIn(), null); + List otherHostsList = findHostByPod(router.getPodIdToDeployIn(), null); + otherHosts = otherHostsList.stream().map(HostVO::getId).collect(Collectors.toList()); } for (Long hostId : otherHosts) { try { Answer pingTestAnswer = _agentMgr.easySend(hostId, new PingTestCommand(routerPrivateIp, privateIp)); if (pingTestAnswer != null && pingTestAnswer.getResult()) { if (logger.isDebugEnabled()) { - logger.debug("user vm's " + vm.getHostName() + " ip address " + privateIp + " has been successfully pinged from the Virtual Router " + - router.getHostName() + ", returning that vm is alive"); + logger.debug("user vm's {} ip address {} has been successfully " + + "pinged from the Virtual Router {}, returning that vm is alive", + vm, privateIp, router); } return Boolean.TRUE; } diff --git a/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java index 961e11e91d74..f2704f35fcdd 100644 --- a/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -131,13 +131,13 @@ protected void updateSecondaryHost(final HostVO host, final StartupStorageComman } } if (logger.isDebugEnabled()) { - logger.debug("Successfully loaded the DataCenter from the zone token passed in "); + logger.debug("Successfully loaded the DataCenter {} from the zone token passed in ", zone); } HostPodVO pod = findPod(startup, zone.getId(), Host.Type.Routing); //yes, routing Long podId = null; if (pod != null) { - logger.debug("Found pod " + pod.getName() + " for the secondary storage host " + startup.getName()); + logger.debug("Found pod {} for the secondary storage host {}", pod, startup.getName()); podId = pod.getId(); } host.setDataCenterId(zone.getId()); diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index a5b2a3b75a5e..c510502f5f9c 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -206,7 +206,7 @@ public NicTO toNicTO(NicProfile profile) { } to.setNicSecIps(secIps); } else { - logger.warn("Unabled to load NicVO for NicProfile " + profile.getId()); + logger.warn("Unabled to load NicVO for NicProfile {}", profile); //Workaround for dynamically created nics //FixMe: uuid and secondary IPs can be made part of nic profile to.setUuid(UUID.randomUUID().toString()); diff --git a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java index c27adc59fde0..98cd6c8c3fa5 100644 --- a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java +++ b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java @@ -134,21 +134,22 @@ protected void setVmQuotaPercentage(VirtualMachineTO to, VirtualMachineProfile v if (host == null) { throw new CloudRuntimeException("Host with id: " + vm.getHostId() + " not found"); } - logger.debug("Limiting CPU usage for VM: " + vm.getUuid() + " on host: " + host.getUuid()); + logger.debug("Limiting CPU usage for VM: {} on host: {}", vm, host); double hostMaxSpeed = getHostCPUSpeed(host); double maxSpeed = getVmSpeed(to); try { BigDecimal percent = new BigDecimal(maxSpeed / hostMaxSpeed); percent = percent.setScale(2, RoundingMode.HALF_DOWN); if (percent.compareTo(new BigDecimal(1)) == 1) { - logger.debug("VM " + vm.getUuid() + " CPU MHz exceeded host " + host.getUuid() + " CPU MHz, limiting VM CPU to the host maximum"); + logger.debug("VM {} CPU MHz exceeded host {} CPU MHz, limiting VM CPU to the host maximum", vm, host); percent = new BigDecimal(1); } to.setCpuQuotaPercentage(percent.doubleValue()); - logger.debug("Host: " + host.getUuid() + " max CPU speed = " + hostMaxSpeed + "MHz, VM: " + vm.getUuid() + - "max CPU speed = " + maxSpeed + "MHz. Setting CPU quota percentage as: " + percent.doubleValue()); + logger.debug("Host: {} max CPU speed = {} MHz, VM: {} max CPU speed = {} MHz. " + + "Setting CPU quota percentage as: {}", + host, hostMaxSpeed, vm, maxSpeed, percent.doubleValue()); } catch (NumberFormatException e) { - logger.error("Error calculating VM: " + vm.getUuid() + " quota percentage, it wll not be set. Error: " + e.getMessage(), e); + logger.error("Error calculating VM: {} quota percentage, it wll not be set. Error: {}", vm, e.getMessage(), e); } } } @@ -241,9 +242,11 @@ protected Pair getHostMaxMemoryAndCpuCores(HostVO host, VirtualMa } Long lastHostId = virtualMachine.getLastHostId(); - logger.info(String.format("%s is not running; therefore, we use the last host [%s] that the VM was running on to derive the unconstrained service offering max CPU and memory.", vmDescription, lastHostId)); HostVO lastHost = lastHostId == null ? null : hostDao.findById(lastHostId); + logger.info("{} is not running; therefore, we use the last host [{}] with id {} that the " + + "VM was running on to derive the unconstrained service offering max CPU " + + "and memory.", vmDescription, lastHost, lastHostId); if (lastHost != null) { maxHostMemory = lastHost.getTotalMemory(); maxHostCpuCore = lastHost.getCpus(); diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java b/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java index 361b1302b287..a4afdb394c16 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java @@ -75,10 +75,9 @@ public void setDpdkVhostUserMode(VirtualMachineTO to, VirtualMachineProfile vm) VHostUserMode dpdKvHostUserMode = VHostUserMode.fromValue(mode); to.addExtraConfig(DPDK_VHOST_USER_MODE, dpdKvHostUserMode.toString()); } catch (IllegalArgumentException e) { - logger.error(String.format("DPDK vHost User mode found as a detail for service offering: %s " + - "but value: %s is not supported. Supported values: %s, %s", - offering.getId(), mode, - VHostUserMode.CLIENT.toString(), VHostUserMode.SERVER.toString())); + logger.error("DPDK vHost User mode found as a detail for service offering: {} " + + "but value: {} is not supported. Supported values: {}, {}", + offering, mode, VHostUserMode.CLIENT.toString(), VHostUserMode.SERVER.toString()); } } } diff --git a/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java index 329e4b903792..bffc35f72dfa 100644 --- a/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java @@ -240,7 +240,7 @@ public void updateExternalLoadBalancerNetworkUsageStats(long loadBalancerRuleId) lbAnswer = (ExternalNetworkResourceUsageAnswer)_agentMgr.easySend(externalLoadBalancer.getId(), cmd); if (lbAnswer == null || !lbAnswer.getResult()) { String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; - String msg = "Unable to get external load balancer stats for network" + networkId + " due to: " + details + "."; + String msg = String.format("Unable to get external load balancer stats for network %s due to: %s.", network, details); logger.error(msg); return; } @@ -413,7 +413,7 @@ protected void runExternalDeviceNetworkUsageTask() { for (NetworkVO network : networksForAccount) { if (!_networkModel.networkIsConfiguredForExternalNetworking(zoneId, network.getId())) { - logger.debug("Network " + network.getId() + " is not configured for external networking, so skipping usage check."); + logger.debug("Network {} is not configured for external networking, so skipping usage check.", network); continue; } @@ -456,7 +456,7 @@ protected void runExternalDeviceNetworkUsageTask() { } } else { if (logger.isTraceEnabled()) { - logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); + logger.trace("Reusing usage Answer for device id {} for Network {}", fwDeviceId, network); } firewallAnswer = fwDeviceUsageAnswerMap.get(fwDeviceId); } @@ -491,7 +491,7 @@ protected void runExternalDeviceNetworkUsageTask() { } } else { if (logger.isTraceEnabled()) { - logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); + logger.trace("Reusing usage Answer for device id {} for Network {}", lbDeviceId, network); } lbAnswer = lbDeviceUsageAnswerMap.get(lbDeviceId); } @@ -598,7 +598,7 @@ private boolean updateStatsEntry(long accountId, long zoneId, long networkId, St } else { URI broadcastURI = network.getBroadcastUri(); if (broadcastURI == null) { - logger.debug("Not updating stats for guest network with ID " + network.getId() + " because the network is not implemented."); + logger.debug("Not updating stats for guest network {} because the network is not implemented.", network); return true; } else { long vlanTag = Integer.parseInt(BroadcastDomainType.getValue(broadcastURI)); diff --git a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index 924a3b75dada..ca108749f012 100644 --- a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -206,11 +206,9 @@ public ExternalFirewallDeviceVO addExternalFirewall(long physicalNetworkId, Stri final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", ntwkDevice.getNetworkServiceProvder(), pNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + - " is not added or in shutdown state in the physical network: " + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not added or in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), pNetwork)); } URI uri; @@ -386,7 +384,7 @@ protected boolean freeFirewallForNetwork(Network network) { _networkExternalFirewallDao.remove(fwDeviceForNetwork.getId()); } } catch (Exception exception) { - logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); + logger.error("Failed to release firewall device for the network {} due to {}", network, exception.getMessage()); return false; } finally { deviceMapLock.unlock(); @@ -551,8 +549,7 @@ public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network netwo } String action = add ? "implemented" : "shut down"; - logger.debug("External firewall has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + - ") with VLAN tag " + guestVlanTag); + logger.debug("External firewall has {} the guest network for account {} with VLAN tag {}", action, account, guestVlanTag); return true; } @@ -572,8 +569,8 @@ public boolean applyFirewallRules(Network network, List assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; " + + "this network is not implemented. Skipping backend commands.", network); return true; } @@ -615,8 +612,8 @@ public boolean applyStaticNatRules(Network network, List ru assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External firewall was asked to apply firewall rules for network {}; " + + "this network is not implemented. Skipping backend commands.", network); return true; } @@ -761,7 +758,7 @@ public int getVlanOffset(long physicalNetworkId, int vlanTag) { } if (pNetwork.getVnet() == null) { - throw new CloudRuntimeException("Could not find vlan range for physical Network " + physicalNetworkId + "."); + throw new CloudRuntimeException("Could not find vlan range for physical Network " + pNetwork + "."); } Integer lowestVlanTag = null; List> vnetList = pNetwork.getVnet(); @@ -820,8 +817,8 @@ public boolean applyPortForwardingRules(Network network, List l boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); if (network.getState() == Network.State.Allocated) { - logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + - "; this network is not implemented. Skipping backend commands."); + logger.debug("External load balancer was asked to apply LB rules for network {}; this network is not implemented. Skipping backend commands.", network); return true; } @@ -1048,13 +1044,13 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g // allocate a load balancer device for the network lbDeviceVO = allocateLoadBalancerForNetwork(guestConfig); if (lbDeviceVO == null) { - String msg = "failed to alloacate a external load balancer for the network " + guestConfig.getId(); + String msg = String.format("failed to allocate a external load balancer for the network %s", guestConfig); logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); - logger.debug("Allocated external load balancer device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); + logger.debug("Allocated external load balancer device: {} for the network: {}", lbDeviceVO, guestConfig); } else { // find the load balancer device allocated for the network ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(guestConfig); @@ -1128,7 +1124,7 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g // release the load balancer allocated for the network boolean releasedLB = freeLoadBalancerForNetwork(guestConfig); if (!releasedLB) { - String msg = "Failed to release the external load balancer used for the network: " + guestConfig.getId(); + String msg = String.format("Failed to release the external load balancer used for the network: %s", guestConfig); logger.error(msg); } } @@ -1136,8 +1132,7 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g if (logger.isDebugEnabled()) { Account account = _accountDao.findByIdIncludingRemoved(guestConfig.getAccountId()); String action = add ? "implemented" : "shut down"; - logger.debug("External load balancer has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + - ") with VLAN tag " + guestVlanTag); + logger.debug("External load balancer has {} the guest network for account {} with VLAN tag {}", action, account, guestVlanTag); } return true; @@ -1194,20 +1189,20 @@ protected IpDeployer getIpDeployerForInlineMode(Network network) { List providers = _networkMgr.getProvidersForServiceInNetwork(network, Service.Firewall); //Only support one provider now if (providers == null) { - logger.error("Cannot find firewall provider for network " + network.getId()); + logger.error("Cannot find firewall provider for network {}", network); return null; } if (providers.size() != 1) { - logger.error("Found " + providers.size() + " firewall provider for network " + network.getId()); + logger.error("Found {} firewall provider for network {}", providers.size(), network); return null; } NetworkElement element = _networkModel.getElementImplementingProvider(providers.get(0).getName()); if (!(element instanceof IpDeployer)) { - logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); + logger.error("The firewall provider for network {} don't have ability to deploy IP address!", network); return null; } - logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); + logger.info("Let {} handle ip association for {} in network {}", element.getName(), getName(), network); return (IpDeployer)element; } @@ -1239,8 +1234,7 @@ public List getLBHealthChecks(Network network, List params) { private IpAddress allocateIP(Account ipOwner, boolean isSystem, long zoneId) throws InsufficientAddressCapacityException, ConcurrentOperationException { Account caller = CallContext.current().getCallingAccount(); - long callerUserId = CallContext.current().getCallingUserId(); + User callerUser = CallContext.current().getCallingUser(); // check permissions _accountMgr.checkAccess(caller, null, false, ipOwner); DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); - return allocateIp(ipOwner, isSystem, caller, callerUserId, zone, null, null); + return allocateIp(ipOwner, isSystem, caller, callerUser, zone, null, null); } // An IP association is required in below cases @@ -670,36 +670,37 @@ public boolean applyRules(List rules, FirewallRule.Purpo return success; } - protected boolean cleanupIpResources(long ipId, long userId, Account caller) { + protected boolean cleanupIpResources(IpAddress ip, long userId, Account caller) { boolean success = true; + long ipId = ip.getId(); // Revoke all firewall rules for the ip try { - logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of public IP id=" + ipId + " release..."); - if (!_firewallMgr.revokeFirewallRulesForIp(ipId, userId, caller)) { - logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of ip release"); + logger.debug("Revoking all {} rules as a part of public IP {} release...", Purpose.Firewall, ip); + if (!_firewallMgr.revokeFirewallRulesForIp(ip, userId, caller)) { + logger.warn("Unable to revoke all the firewall rules for ip {} as a part of ip release", ip); success = false; } } catch (ResourceUnavailableException e) { - logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip {} as a part of ip release", ip, e); success = false; } // Revoke all PF/Static nat rules for the ip try { - logger.debug("Revoking all " + Purpose.PortForwarding + "/" + Purpose.StaticNat + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all {}/{} rules as a part of public IP {} release...", Purpose.PortForwarding, Purpose.StaticNat, ip); if (!_rulesMgr.revokeAllPFAndStaticNatRulesForIp(ipId, userId, caller)) { - logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the port forwarding rules for ip {} as a part of ip release", ip); success = false; } } catch (ResourceUnavailableException e) { - logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all the port forwarding rules for ip {} as a part of ip release", ip, e); success = false; } - logger.debug("Revoking all " + Purpose.LoadBalancing + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all {} rules as a part of public IP {} release...", Purpose.LoadBalancing, ip); if (!_lbMgr.removeAllLoadBalanacersForIp(ipId, caller, userId)) { - logger.warn("Unable to revoke all the load balancer rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the load balancer rules for ip {} as a part of ip release", ip); success = false; } @@ -707,11 +708,11 @@ protected boolean cleanupIpResources(long ipId, long userId, Account caller) { // conditions // only when ip address failed to be cleaned up as a part of account destroy and was marked as Releasing, this part of // the code would be triggered - logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); + logger.debug("Cleaning up remote access vpns as a part of public IP {} release...", ip); try { _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller,false); } catch (ResourceUnavailableException e) { - logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to destroy remote access vpn for ip {} as a part of ip release", ip, e); success = false; } @@ -720,8 +721,9 @@ protected boolean cleanupIpResources(long ipId, long userId, Account caller) { @Override @DB - public boolean disassociatePublicIpAddress(long addrId, long userId, Account caller) { + public boolean disassociatePublicIpAddress(IpAddress ipAddress, long userId, Account caller) { boolean success = true; + long addrId = ipAddress.getId(); try { IPAddressVO ipToBeDisassociated = _ipAddressDao.acquireInLockTable(addrId); @@ -733,9 +735,9 @@ public boolean disassociatePublicIpAddress(long addrId, long userId, Account cal PublicIpQuarantine publicIpQuarantine = null; // Cleanup all ip address resources - PF/LB/Static nat rules - if (!cleanupIpResources(addrId, userId, caller)) { + if (!cleanupIpResources(ipAddress, userId, caller)) { success = false; - logger.warn("Failed to release resources for ip address id=" + addrId); + logger.warn("Failed to release resources for ip address {}", ipAddress); } IPAddressVO ip = markIpAsUnavailable(addrId); @@ -744,7 +746,7 @@ public boolean disassociatePublicIpAddress(long addrId, long userId, Account cal } if (logger.isDebugEnabled()) { - logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); + logger.debug("Releasing ip {}; sourceNat = {}", ip, ip.isSourceNat()); } if (ip.getAssociatedWithNetworkId() != null) { @@ -768,7 +770,7 @@ public boolean disassociatePublicIpAddress(long addrId, long userId, Account cal if (ip.isPortable()) { releasePortableIpAddress(addrId); } - logger.debug("Released a public ip id=" + addrId); + logger.debug("Released a public ip {}", ip); } else if (publicIpQuarantine != null) { removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it."); } @@ -1061,11 +1063,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } } else { - logger.error("Failed to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); + logger.error("Failed to mark public IP as allocated: {}", addr); } } } else { - logger.error("Failed to acquire row lock to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); + logger.error("Failed to acquire row lock to mark public IP as allocated: {}", addr); } } }); @@ -1118,7 +1120,7 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd throw ex; } if (logger.isDebugEnabled()) { - logger.debug("lock account " + ownerId + " is acquired"); + logger.debug("lock account {} is acquired", owner); } List vlanDbIds = null; boolean displayIp = true; @@ -1139,19 +1141,19 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd } }); if (ip.getState() != State.Allocated) { - logger.error("Failed to fetch new IP and allocate it for ip with id=" + ip.getId() + ", address=" + ip.getAddress()); + logger.error("Failed to fetch new IP and allocate it for ip: {}", ip); } return ip; } finally { if (owner != null) { if (logger.isDebugEnabled()) { - logger.debug("Releasing lock account " + ownerId); + logger.debug("Releasing lock account {}", owner); } _accountDao.releaseFromLockTable(ownerId); } if (ip == null) { - logger.error("Unable to get source nat ip address for account " + ownerId); + logger.error("Unable to get source nat ip address for account {}", owner); } } } @@ -1170,13 +1172,13 @@ public boolean applyIpAssociations(Network network, boolean continueOnError) thr markPublicIpAsAllocated(addr); } else if (addr.getState() == IpAddress.State.Releasing) { // Cleanup all the resources for ip address if there are any, and only then un-assign ip in the system - if (cleanupIpResources(addr.getId(), Account.ACCOUNT_ID_SYSTEM, _accountMgr.getSystemAccount())) { + if (cleanupIpResources(addr, Account.ACCOUNT_ID_SYSTEM, _accountMgr.getSystemAccount())) { addPublicIpAddressToQuarantine(addr, network.getDomainId()); _ipAddressDao.unassignIpAddress(addr.getId()); messageBus.publish(_name, MESSAGE_RELEASE_IPADDR_EVENT, PublishScope.LOCAL, addr); } else { success = false; - logger.warn("Failed to release resources for ip address id=" + addr.getId()); + logger.warn("Failed to release resources for ip address: {}", addr); } } } @@ -1285,8 +1287,8 @@ public void releasePodIp(Long id) throws CloudRuntimeException { } if (ipVO.getTakenAt() == null) { - logger.debug("Ip Address with id= " + id + " is not allocated, so do nothing."); - throw new CloudRuntimeException("Ip Address with id= " + id + " is not allocated, so do nothing."); + logger.debug("Ip Address {} is not allocated, so do nothing.", ipVO); + throw new CloudRuntimeException(String.format("Ip Address %s is not allocated, so do nothing.", ipVO)); } // Verify permission DataCenter zone = _entityMgr.findById(DataCenter.class, ipVO.getDataCenterId()); @@ -1303,7 +1305,7 @@ public void releasePodIp(Long id) throws CloudRuntimeException { @DB @Override - public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Account caller, long callerUserId, final DataCenter zone, final Boolean displayIp, final String ipaddress) + public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Account caller, User callerUser, final DataCenter zone, final Boolean displayIp, final String ipaddress) throws ConcurrentOperationException, InsufficientAddressCapacityException, CloudRuntimeException { final VlanType vlanType = VlanType.VirtualNetwork; @@ -1321,11 +1323,11 @@ public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Accou Account accountToLock = null; try { if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug(String.format("Associate IP address called by the user %s account %s", callerUser, ipOwner)); } accountToLock = _accountDao.acquireInLockTable(ipOwner.getId()); if (accountToLock == null) { - logger.warn("Unable to lock account: " + ipOwner.getId()); + logger.warn("Unable to lock account: {}", ipOwner); throw new ConcurrentOperationException("Unable to acquire account lock"); } @@ -1355,7 +1357,7 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd CallContext.current().setEventDetails("Ip Id: " + ip.getId()); Ip ipAddress = ip.getAddress(); - logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); + logger.debug("Got {} to assign for account {} in zone {}", ipAddress, ipOwner, zone); return ip; } @@ -1504,7 +1506,8 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean } if (ipToAssoc.getAssociatedWithNetworkId() != null) { - logger.debug("IP " + ipToAssoc + " is already associated with network id=" + networkId); + logger.debug("IP {} is already associated with network {}", + ipToAssoc::toString, () -> _networksDao.findById(ipToAssoc.getAssociatedWithNetworkId())); return ipToAssoc; } @@ -1666,7 +1669,7 @@ public IPAddressVO disassociatePortableIPToGuestNetwork(long ipId, long networkI } if (ipToAssoc.getAssociatedWithNetworkId() != network.getId()) { - throw new InvalidParameterValueException("IP " + ipToAssoc + " is not associated with network id" + networkId); + throw new InvalidParameterValueException(String.format("IP %s is not associated with network: %s", ipToAssoc, network)); } DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); @@ -1697,7 +1700,7 @@ public IPAddressVO disassociatePortableIPToGuestNetwork(long ipId, long networkI if (!ipToServices.isEmpty()) { Set services = ipToServices.get(publicIp); if (services != null && !services.isEmpty()) { - throw new InvalidParameterValueException("IP " + ipToAssoc + " has services and rules associated in the network " + networkId); + throw new InvalidParameterValueException(String.format("IP %s has services and rules associated in the network %s", ipToAssoc, network)); } } @@ -1769,7 +1772,7 @@ public void transferPortableIP(final long ipAddrId, long currentNetworkId, long // disassociate portable IP with current network/VPC network if (srcNetwork.getVpcId() != null) { - _vpcMgr.unassignIPFromVpcNetwork(ipAddrId, currentNetworkId); + _vpcMgr.unassignIPFromVpcNetwork(ip, srcNetwork); } else { disassociatePortableIPToGuestNetwork(ipAddrId, currentNetworkId); } @@ -1830,12 +1833,12 @@ protected List getIsolatedNetworksWithSourceNATOwnedByAccount @DB public boolean associateIpAddressListToAccount(long userId, final long accountId, final long zoneId, final Long vlanId, final Network guestNetworkFinal) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, ResourceAllocationException { - final Account owner = _accountMgr.getActiveAccountById(accountId); - if (guestNetworkFinal != null && guestNetworkFinal.getTrafficType() != TrafficType.Guest) { throw new InvalidParameterValueException("Network " + guestNetworkFinal + " is not of a type " + TrafficType.Guest); } + final Account owner = _accountMgr.getActiveAccountById(accountId); + DataCenter zone = _dcDao.findById(zoneId); Ternary, Network> pair = null; try { pair = Transaction.execute(new TransactionCallbackWithException, Network>, Exception>() { @@ -1873,19 +1876,23 @@ public Ternary, Network> doInTransaction(Transa + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() - + " as a part of createVlanIpRange process"); + logger.debug("Creating network for account {} from the network" + + " offering [{}] as a part of createVlanIpRange process", owner, requiredOfferings.get(0)); guestNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zoneId, ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null, null); if (guestNetwork == null) { - logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); - throw new CloudRuntimeException("Failed to create a Guest Isolated Networks with SourceNAT " - + "service enabled as a part of createVlanIpRange, for the account " + accountId + "in zone " + zoneId); + logger.warn("Failed to create default Virtual network for the account {} in zone {}", owner, zone); + throw new CloudRuntimeException(String.format("Failed to create a" + + " Guest Isolated Networks with SourceNAT service enabled " + + "as a part of createVlanIpRange, for the account %s in " + + "zone %s", owner, zone)); } } else { - throw new CloudRuntimeException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + throw new CloudRuntimeException(String.format( + "Required network offering [%s] is not in %s state", + requiredOfferings.get(0), NetworkOffering.State.Enabled)); } } @@ -1933,7 +1940,6 @@ public Ternary, Network> doInTransaction(Transa // if the network offering has persistent set to true, implement the network if (createNetwork && requiredOfferings.get(0).isPersistent()) { - DataCenter zone = _dcDao.findById(zoneId); DeployDestination dest = new DeployDestination(zone, null, null, null); Account callerAccount = CallContext.current().getCallingAccount(); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); @@ -1965,7 +1971,7 @@ public IPAddressVO markIpAsUnavailable(final long addrId) { final IPAddressVO ip = _ipAddressDao.findById(addrId); if (ip.getAllocatedToAccountId() == null && ip.getAllocatedTime() == null) { - logger.trace("Ip address id=" + addrId + " is already released"); + logger.trace("Ip address: {} is already released", ip); return ip; } @@ -2250,11 +2256,11 @@ public boolean handleSystemIpRelease(IpAddress ip) { if (networkId != null) { if (ip.getSystem()) { CallContext ctx = CallContext.current(); - if (!disassociatePublicIpAddress(ip.getId(), ctx.getCallingUserId(), ctx.getCallingAccount())) { - logger.warn("Unable to release system ip address id=" + ip.getId()); + if (!disassociatePublicIpAddress(ip, ctx.getCallingUserId(), ctx.getCallingAccount())) { + logger.warn("Unable to release system ip address: {}", ip); success = false; } else { - logger.warn("Successfully released system ip address id=" + ip.getId()); + logger.warn("Successfully released system ip address: {}", ip); } } } @@ -2330,7 +2336,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff //Get ip address from the placeholder and don't allocate a new one if (requestedIpv4 != null && vm.getType() == VirtualMachine.Type.DomainRouter) { - logger.debug("There won't be nic assignment for VR id " + vm.getId() + " in this network " + network); + logger.debug("There won't be nic assignment for VR {} in this network {}", vm, network); } @@ -2421,7 +2427,7 @@ public boolean isUsageHidden(IPAddressVO ip) { networkId = ip.getSourceNetworkId(); } if (networkId == null) { - throw new CloudRuntimeException("No network for IP " + ip.getId()); + throw new CloudRuntimeException(String.format("No network for IP %s", ip)); } NetworkDetailVO networkDetail = _networkDetailsDao.findDetail(networkId, Network.hideIpAddressUsage); return networkDetail != null && "true".equals(networkDetail.getValue()); diff --git a/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java b/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java index 4cee7423cbfc..d4c293ecf88b 100644 --- a/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java @@ -105,7 +105,7 @@ public String allocateGuestIpv6(Network network, String requestedIpv6) throws In public String acquireGuestIpv6Address(Network network, String requestedIpv6) throws InsufficientAddressCapacityException { if (!_networkModel.areThereIPv6AddressAvailableInNetwork(network.getId())) { throw new InsufficientAddressCapacityException( - String.format("There is no IPv6 address available in the network [name=%s, network id=%s]", network.getName(), network.getId()), DataCenter.class, + String.format("There is no IPv6 address available in the network [name=%s, id=%s, uuid=%s]", network.getName(), network.getId(), network.getUuid()), DataCenter.class, network.getDataCenterId()); } @@ -123,7 +123,7 @@ public String acquireGuestIpv6Address(Network network, String requestedIpv6) thr if (ip != null) { State ipState = ip.getState(); if (ipState != State.Free) { - throw new InsufficientAddressCapacityException(String.format("Requested ip address [%s] is not free [ip state=%]", requestedIpv6, ipState), DataCenter.class, + throw new InsufficientAddressCapacityException(String.format("Requested ip address [%s] is not free [ip state=%s]", requestedIpv6, ipState), DataCenter.class, network.getDataCenterId()); } } @@ -158,19 +158,22 @@ public String allocatePublicIp6ForGuestNic(Network network, Long podId, Account protected void checkIfCanAllocateIpv6Address(Network network, String ipv6) throws InsufficientAddressCapacityException { if (isIp6Taken(network, ipv6)) { throw new InsufficientAddressCapacityException( - String.format("The IPv6 address [%s] is already in use in the network [id=%s, name=%s]", ipv6, network.getId(), network.getName()), Network.class, + String.format("The IPv6 address [%s] is already in use in the network [id=%s, uuid=%s, name=%s]", + ipv6, network.getId(), network.getUuid(), network.getName()), Network.class, network.getId()); } if (ipAddressManager.isIpEqualsGatewayOrNetworkOfferingsEmpty(network, ipv6)) { throw new InvalidParameterValueException( - String.format("The network [id=%s] offering is empty or the requested IP address [%s] is equals to the Gateway", network.getId(), ipv6)); + String.format("The network [id=%s, uuid=%s, name=%s] offering is empty or the requested IP address [%s] is equals to the Gateway", + network.getId(), network.getUuid(), network.getName(), ipv6)); } String networkIp6Cidr = network.getIp6Cidr(); if (!NetUtils.isIp6InNetwork(ipv6, networkIp6Cidr)) { throw new InvalidParameterValueException( - String.format("The IPv6 address [%s] is not in the network [id=%s, name=%s, ipv6cidr=%s]", ipv6, network.getId(), network.getName(), network.getIp6Cidr())); + String.format("The IPv6 address [%s] is not in the network [id=%s, uuid=%s name=%s, ipv6cidr=%s]", + ipv6, network.getId(), network.getUuid(), network.getName(), network.getIp6Cidr())); } } @@ -210,7 +213,7 @@ public void setNicIp6Address(final NicProfile nic, final DataCenter dc, final Ne setNicPropertiesFromNetwork(nic, network); IPv6Address ipv6addr = NetUtils.EUI64Address(network.getIp6Cidr(), nic.getMacAddress()); - logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); + logger.info("Calculated IPv6 address {} using EUI-64 for NIC {}", ipv6addr, nic); nic.setIPv6Address(ipv6addr.toString()); if (nic.getIPv4Address() != null) { diff --git a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java index d5b3cab44a60..1894fbcfcb0b 100644 --- a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java @@ -35,6 +35,9 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.network.dao.PhysicalNetworkDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.command.admin.network.CreateGuestNetworkIpv6PrefixCmd; import org.apache.cloudstack.api.command.admin.network.DeleteGuestNetworkIpv6PrefixCmd; @@ -118,6 +121,8 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi @Inject VlanDao vlanDao; @Inject + DataCenterDao zoneDao; + @Inject DataCenterGuestIpv6PrefixDao dataCenterGuestIpv6PrefixDao; @Inject Ipv6GuestPrefixSubnetNetworkMapDao ipv6GuestPrefixSubnetNetworkMapDao; @@ -130,6 +135,8 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi @Inject NicDao nicDao; @Inject + PhysicalNetworkDao physicalNetworkDao; + @Inject DomainRouterDao domainRouterDao; @Inject AccountManager accountManager; @@ -159,8 +166,8 @@ private boolean isPublicIpv6PlaceholderNic(NicVO nic) { NicVO nic = nicOptional.get(); Optional vlanOptional = ranges.stream().filter(v -> nic.getIPv6Cidr().equals(v.getIp6Cidr()) && nic.getIPv6Gateway().equals(v.getIp6Gateway())).findFirst(); if (vlanOptional.isEmpty()) { - logger.error(String.format("Public IPv6 placeholder NIC with cidr: %s, gateway: %s for network ID: %d is not present in the allocated VLAN: %s", - nic.getIPv6Cidr(), nic.getIPv6Gateway(),network.getId(), ranges.get(0).getVlanTag())); + logger.error("Public IPv6 placeholder NIC {} with cidr: {}, gateway: {} for network: {} is not present in the allocated VLAN: {}", + nic, nic.getIPv6Cidr(), nic.getIPv6Gateway(), network, ranges.get(0).getVlanTag()); return null; } return new Pair<>(nic.getIPv6Address(), vlanOptional.get()); @@ -205,9 +212,11 @@ private void processPublicIpv6AddressUpdateForVpcTier(final Network network, fin private Pair assignPublicIpv6ToNetworkInternal(Network network, String vlanId, String nicMacAddress) throws InsufficientAddressCapacityException { final List ranges = vlanDao.listIpv6RangeByZoneIdAndVlanId(network.getDataCenterId(), vlanId); if (CollectionUtils.isEmpty(ranges)) { - logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlanId)); + DataCenterVO zone = zoneDao.findById(network.getDataCenterId()); + logger.error("Unable to find IPv6 address for zone: {}, physical network: {}, VLAN: {}", + zone, physicalNetworkDao.findById(network.getPhysicalNetworkId()), vlanId); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, network.getDataCenterId()); - ex.addProxyObject(ApiDBUtils.findZoneById(network.getDataCenterId()).getUuid()); + ex.addProxyObject(zone.getUuid()); throw ex; } Pair placeholderResult = getPublicIpv6FromNetworkPlaceholder(network, ranges); @@ -330,11 +339,11 @@ public Pair getUsedTotalIpv6SubnetForZone(long zoneId) { return new Pair<>(used, total); } - public Pair preAllocateIpv6SubnetForNetwork(long zoneId) throws ResourceAllocationException { + public Pair preAllocateIpv6SubnetForNetwork(DataCenter zone) throws ResourceAllocationException { return Transaction.execute((TransactionCallbackWithException, ResourceAllocationException>) status -> { - List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId); + List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId()); if (CollectionUtils.isEmpty(prefixes)) { - logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", zoneId)); + logger.error("IPv6 prefixes not found for the zone: {}", zone); throw new ResourceAllocationException("Unable to allocate IPv6 network", Resource.ResourceType.network); } Ipv6GuestPrefixSubnetNetworkMapVO ip6Subnet = null; @@ -492,7 +501,7 @@ public void updateIpv6RoutesForVpcResponse(Vpc vpc, VpcResponse response) { public void checkNetworkIpv6Upgrade(Network network) throws InsufficientAddressCapacityException, ResourceAllocationException { List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(network.getDataCenterId()); if (CollectionUtils.isEmpty(prefixes)) { - logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", network.getDataCenterId())); + logger.error("IPv6 prefixes not found for the zone: {}", zoneDao.findById(network.getDataCenterId())); throw new ResourceAllocationException("Unable to allocate IPv6 network", Resource.ResourceType.network); } List addresses = network.getVpcId() == null ? @@ -502,9 +511,11 @@ public void checkNetworkIpv6Upgrade(Network network) throws InsufficientAddressC VlanVO vlan = vlanDao.findById(address.getVlanId()); final List ranges = vlanDao.listIpv6RangeByZoneIdAndVlanId(network.getDataCenterId(), vlan.getVlanTag()); if (CollectionUtils.isEmpty(ranges)) { - logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlan.getVlanTag())); + DataCenterVO zone = zoneDao.findById(network.getDataCenterId()); + logger.error("Unable to find IPv6 address for zone: {}, physical network: {}, VLAN: {}", + zone, physicalNetworkDao.findById(network.getPhysicalNetworkId()), vlan.getVlanTag()); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, network.getDataCenterId()); - ex.addProxyObject(ApiDBUtils.findZoneById(network.getDataCenterId()).getUuid()); + ex.addProxyObject(zone.getUuid()); throw ex; } } @@ -576,7 +587,7 @@ public FirewallRule createIpv6FirewallRule(CreateIpv6FirewallRuleCmd cmd) throws if (!supportedProtocols.contains(protocol.toLowerCase())) { throw new InvalidParameterValueException(String.format("Protocol %s is not supported in zone", protocol)); } else if (!supportedTrafficTypes.contains(trafficType.toString().toLowerCase())) { - throw new InvalidParameterValueException("Traffic Type " + trafficType + " is currently supported by Firewall in network " + networkId); + throw new InvalidParameterValueException(String.format("Traffic Type %s is currently supported by Firewall in network %s", trafficType, network)); } } @@ -653,9 +664,9 @@ public boolean applyIpv6FirewallRule(long id) { return false; } if (!FirewallRule.Purpose.Ipv6Firewall.equals(rule.getPurpose())) { - logger.error(String.format("Cannot apply IPv6 firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Ipv6Firewall)); + logger.error("Cannot apply IPv6 firewall rule: {} as purpose {} is not {}", rule, rule.getPurpose(), FirewallRule.Purpose.Ipv6Firewall); } - logger.debug(String.format("Applying IPv6 firewall rules for rule with ID: %s", rule.getUuid())); + logger.debug(String.format("Applying IPv6 firewall rules for rule: %s", rule)); List rules = firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), FirewallRule.Purpose.Ipv6Firewall, FirewallRule.TrafficType.Ingress)); return firewallManager.applyFirewallRules(rules, false, CallContext.current().getCallingAccount()); diff --git a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java index 39546dc90612..2e92acd71558 100644 --- a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java @@ -183,7 +183,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { @Override public long makeCopyOfNetwork(Network network, NetworkOffering networkOffering, Long vpcId) { if (logger.isDebugEnabled()) { - logger.debug("Making a copy of network with uuid " + network.getUuid() + " and id " + network.getId() + " for migration."); + logger.debug("Making a copy of network {} for migration.", network); } long originalNetworkId = network.getId(); NetworkDomainVO domainNetworkMapByNetworkId = _networkDomainDao.getDomainNetworkMapByNetworkId(originalNetworkId); @@ -235,7 +235,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { _networksDao.update(networkCopyId, copiedNetwork); copyNetworkDetails(originalNetworkId, networkCopyId); - copyFirewallRulesToNewNetwork(network, networkCopyId); + copyFirewallRulesToNewNetwork(network, copiedNetwork); assignUserNicsToNewNetwork(originalNetworkId, networkCopyId); assignRouterNicsToNewNetwork(network.getId(), networkCopyId); @@ -287,7 +287,7 @@ private void assignUserNicsToNewNetwork(long srcNetworkId, long dstNetworkId) { public Long makeCopyOfVpc(long vpcId, long vpcOfferingId) { VpcVO vpc = _vpcDao.findById(vpcId); if (logger.isDebugEnabled()) { - logger.debug("Making a copy of vpc with uuid " + vpc.getUuid() + " and id " + vpc.getId() + " for migration."); + logger.debug("Making a copy of vpc {} for migration.", vpc); } if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Specified vpc id doesn't exist in the system"); @@ -393,11 +393,11 @@ private void copyVpcResourceTagsToNewVpc(long srcVpcId, long dstVpcId){ } } - private void copyFirewallRulesToNewNetwork(Network srcNetwork, long dstNetworkId) { + private void copyFirewallRulesToNewNetwork(Network srcNetwork, Network dstNetwork) { List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(srcNetwork.getId(), FirewallRule.Purpose.Firewall, FirewallRule.TrafficType.Egress); firewallRules.addAll(_firewallDao.listByNetworkPurposeTrafficType(srcNetwork.getId(), FirewallRule.Purpose.Firewall, FirewallRule.TrafficType.Ingress)); if (logger.isDebugEnabled()) { - logger.debug("Copying firewall rules from network with id " + srcNetwork.getId() + " to network with id " + dstNetworkId); + logger.debug("Copying firewall rules from network {} to network {}", srcNetwork, dstNetwork); } //Loop over all the firewall rules in the original network and copy all values to a new firewall rule @@ -408,7 +408,7 @@ private void copyFirewallRulesToNewNetwork(Network srcNetwork, long dstNetworkId originalFirewallRule.getSourcePortStart(), originalFirewallRule.getSourcePortEnd(), originalFirewallRule.getProtocol(), - dstNetworkId, + dstNetwork.getId(), srcNetwork.getAccountId(), srcNetwork.getDomainId(), originalFirewallRule.getPurpose(), @@ -613,7 +613,7 @@ private void reapplyPublicIps(Network networkInOldPhysicalNetwork, Network netwo ipAddress.setAssociatedWithNetworkId(networkInNewPhysicalNet.getId()); _ipAddressDao.persist(ipAddress); } else { - _ipAddressManager.disassociatePublicIpAddress(ipAddress.getId(), callerUserId, caller); + _ipAddressManager.disassociatePublicIpAddress(ipAddress, callerUserId, caller); } } diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 1276ec220679..47225a635dcf 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -635,7 +635,7 @@ public boolean canUseForDeploy(Network network) { } } else { if (network.getCidr() == null) { - logger.debug("Network - " + network.getId() + " has NULL CIDR."); + logger.debug("Network - {} has NULL CIDR.", network); return false; } hasFreeIps = (getAvailableIps(network, null)).size() > 0; @@ -979,7 +979,7 @@ public String getIpOfNetworkElementInVirtualNetwork(long accountId, long dataCen List virtualNetworks = _networksDao.listByZoneAndGuestType(accountId, dataCenterId, GuestType.Isolated, false); if (virtualNetworks.isEmpty()) { - logger.trace("Unable to find default Virtual network account id=" + accountId); + logger.trace("Unable to find default Virtual network for account: {}", () -> _accountDao.findById(accountId)); return null; } @@ -990,7 +990,7 @@ public String getIpOfNetworkElementInVirtualNetwork(long accountId, long dataCen if (networkElementNic != null) { return networkElementNic.getIPv4Address(); } else { - logger.warn("Unable to set find network element for the network id=" + virtualNetwork.getId()); + logger.warn("Unable to set find network element for the network {}", virtualNetwork); return null; } } @@ -1224,13 +1224,14 @@ private Long getPhysicalNetworkId(long zoneId, List pNtwks, S Long pNtwkId = null; for (PhysicalNetwork pNtwk : pNtwks) { if (tag == null && pNtwk.getTags().isEmpty()) { - logger.debug("Found physical network id=" + pNtwk.getId() + " with null tag"); + logger.debug("Found physical network {} with null tag", pNtwk); if (pNtwkId != null) { - throw new CloudRuntimeException("There is more than 1 physical network with empty tag in the zone id=" + zoneId); + throw new CloudRuntimeException(String.format("There is more than 1 physical" + + " network with empty tag in the zone %s", _dcDao.findById(zoneId))); } pNtwkId = pNtwk.getId(); } else if (tag != null && pNtwk.getTags().contains(tag)) { - logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); + logger.debug("Found physical network {} based on requested tags {}", pNtwk, tag); pNtwkId = pNtwk.getId(); break; } @@ -1275,7 +1276,7 @@ public boolean isSecurityGroupSupportedInNetwork(Network network) { physicalNetworkId = findPhysicalNetworkId(network.getDataCenterId(), null, null); } - return isServiceEnabledInNetwork(physicalNetworkId, network.getId(), SecurityGroup); + return isServiceEnabledInNetwork(physicalNetworkId, network, SecurityGroup); } @Override @@ -1327,8 +1328,9 @@ public String getDefaultManagementTrafficLabel(long zoneId, HypervisorType hyper } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + - ex.getMessage()); + logger.debug("Failed to retrive the default label for management " + + "traffic: zone: {} hypervisor: {} due to: {}", + () -> _dcDao.findById(zoneId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1356,8 +1358,9 @@ public String getDefaultStorageTrafficLabel(long zoneId, HypervisorType hypervis } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for storage traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + - ex.getMessage()); + logger.debug("Failed to retrive the default label for storage " + + "traffic: zone: {} hypervisor: {} due to: {}", + () -> _dcDao.findById(zoneId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1393,7 +1396,8 @@ public List getPhysicalNetworkInfo(long dcId, Hypervis public boolean isProviderEnabledInPhysicalNetwork(long physicalNetowrkId, String providerName) { PhysicalNetworkServiceProviderVO ntwkSvcProvider = _pNSPDao.findByServiceProvider(physicalNetowrkId, providerName); if (ntwkSvcProvider == null) { - logger.warn("Unable to find provider " + providerName + " in physical network id=" + physicalNetowrkId); + logger.warn("Unable to find provider {} in physical network {}", + providerName::toString, () -> _physicalNetworkDao.findById(physicalNetowrkId)); return false; } return isProviderEnabled(ntwkSvcProvider); @@ -1697,7 +1701,7 @@ public final void checkNetworkPermissions(Account caller, Network network) { if (network == null) { throw new CloudRuntimeException("cannot check permissions on (Network) "); } - logger.info(String.format("Checking permission for account %s (%s) on network %s (%s)", caller.getAccountName(), caller.getUuid(), network.getName(), network.getUuid())); + logger.info("Checking permission for account {} on network {}", caller, network); if (network.getGuestType() != GuestType.Shared || network.getAclType() == ACLType.Account) { checkAccountNetworkPermissions(caller, network); @@ -1837,7 +1841,7 @@ private void checkSharedNetworkOperatePermissions(Account owner, Network network throw new PermissionDeniedException(String.format("Shared network %s belongs to domain cannot be operated by normal user %s", network, owner)); } } else if (owner.getType() != Account.Type.ADMIN) { - throw new PermissionDeniedException(String.format("Shared network %s cannot be operated by account %s with type = %d", network, owner, owner.getType())); + throw new PermissionDeniedException(String.format("Shared network %s cannot be operated by account %s with type = %s", network, owner, owner.getType())); } } @@ -1879,8 +1883,9 @@ public String getDefaultPublicTrafficLabel(long dcId, HypervisorType hypervisorT } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrieve the default label for public traffic." + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to: " + - ex.getMessage()); + logger.debug("Failed to retrieve the default label for public " + + "traffic. zone: {} hypervisor: {} due to: {}", + () -> _dcDao.findById(dcId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1908,8 +1913,9 @@ public String getDefaultGuestTrafficLabel(long dcId, HypervisorType hypervisorTy } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for guest traffic:" + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to:" + - ex.getMessage()); + logger.debug("Failed to retrieve the default label for guest " + + "traffic: zone: {} hypervisor: {} due to:{}", + () -> _dcDao.findById(dcId), hypervisorType::toString, ex::getMessage); } } return null; @@ -1981,13 +1987,13 @@ public boolean isNetworkAvailableInDomain(long networkId, long domainId) { Long networkDomainId = null; Network network = getNetwork(networkId); if (network.getGuestType() != GuestType.Shared) { - logger.trace("Network id=" + networkId + " is not shared"); + logger.trace("Network {} is not shared", network); return false; } NetworkDomainVO networkDomainMap = _networkDomainDao.getDomainNetworkMapByNetworkId(networkId); if (networkDomainMap == null) { - logger.trace("Network id=" + networkId + " is shared, but not domain specific"); + logger.trace("Network {} is shared, but not domain specific", network); return true; } else { networkDomainId = networkDomainMap.getDomainId(); @@ -2077,17 +2083,18 @@ boolean isProviderEnabled(PhysicalNetworkServiceProvider provider) { return true; } - boolean isServiceEnabledInNetwork(long physicalNetworkId, long networkId, Service service) { + boolean isServiceEnabledInNetwork(long physicalNetworkId, Network network, Service service) { // check if the service is supported in the network - if (!areServicesSupportedInNetwork(networkId, service)) { - logger.debug("Service " + service.getName() + " is not supported in the network id=" + networkId); + if (!areServicesSupportedInNetwork(network.getId(), service)) { + logger.debug("Service {} is not supported in the network {}", service.getName(), network); return false; } // get provider for the service and check if all of them are supported - String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(networkId, service); + String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), service); if (!isProviderEnabledInPhysicalNetwork(physicalNetworkId, provider)) { - logger.debug("Provider " + provider + " is not enabled in physical network id=" + physicalNetworkId); + logger.debug("Provider {} is not enabled in physical network {}", + provider::toString, () -> _physicalNetworkDao.findById(physicalNetworkId)); return false; } @@ -2111,7 +2118,8 @@ PhysicalNetwork getOnePhysicalNetworkByZoneAndTrafficType(long zoneId, TrafficTy } if (networkList.size() > 1) { - logger.info("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType + ". "); + logger.info("More than one physical networks exist in zone {} with traffic type {}", + () -> _dcDao.findById(zoneId), trafficType::toString); } return networkList.get(0); @@ -2568,7 +2576,7 @@ public boolean isNetworkReadyForGc(long networkId) { // The active nics count (nics_count in op_networks table) might be wrong due to some reasons, should check the state of vms as well. // (nics for Starting VMs might not be allocated yet as Starting state also used when vm is being Created) if (_nicDao.countNicsForNonStoppedVms(networkId) > 0 || _nicDao.countNicsForNonStoppedRunningVrs(networkId) > 0) { - logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are not Stopped at the moment"); + logger.debug("Network {} is not ready for GC as it has vms that are not Stopped at the moment", network); return false; } diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index ffb0be834948..c6628e457dec 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -717,7 +717,7 @@ public IpAddress allocateIP(Account ipOwner, long zoneId, Long networkId, Boolea throws ResourceAllocationException, InsufficientAddressCapacityException, ConcurrentOperationException { Account caller = CallContext.current().getCallingAccount(); - long callerUserId = CallContext.current().getCallingUserId(); + User callerUser = CallContext.current().getCallingUser(); DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); if (networkId != null) { @@ -735,9 +735,9 @@ public IpAddress allocateIP(Account ipOwner, long zoneId, Long networkId, Boolea if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug("Associate IP address called by the user {} account {}", callerUser, ipOwner); } - return _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUserId, zone, displayIp, ipaddress); + return _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUser, zone, displayIp, ipaddress); } else { throw new InvalidParameterValueException("Associate IP address can only be called on the shared networks in the advanced zone" + " with Firewall/Source Nat/Static Nat/Port Forwarding/Load balancing services enabled"); @@ -748,7 +748,7 @@ public IpAddress allocateIP(Account ipOwner, long zoneId, Long networkId, Boolea _accountMgr.checkAccess(caller, null, false, ipOwner); } - IpAddress address = _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUserId, zone, displayIp, ipaddress); + IpAddress address = _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUser, zone, displayIp, ipaddress); if (address != null) { CallContext.current().putContextParameter(IpAddress.class, address.getUuid()); } @@ -782,7 +782,7 @@ public IpAddress allocatePortableIP(Account ipOwner, int regionId, Long zoneId, if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug("Associate IP address called by the user {} account {}", CallContext.current().getCallingUser(), ipOwner); } return _ipAddrMgr.allocatePortableIp(ipOwner, caller, zoneId, networkId, null); } else { @@ -910,7 +910,7 @@ public NicSecondaryIp allocateSecondaryGuestIP(final long nicId, IpAddresses req int maxAllowedIpsPerNic = NumbersUtil.parseInt(_configDao.getValue(Config.MaxNumberOfSecondaryIPsPerNIC.key()), 10); Long nicWiseIpCount = _nicSecondaryIpDao.countByNicId(nicId); if (nicWiseIpCount.intValue() >= maxAllowedIpsPerNic) { - logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"" + maxAllowedIpsPerNic + " per Nic has been crossed for the nic " + nicId + "."); + logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"{} per Nic has been crossed for the nic {}.", maxAllowedIpsPerNic, nicVO); throw new InsufficientAddressCapacityException("Maximum Number of Ips per Nic has been crossed.", Nic.class, nicId); } @@ -945,10 +945,10 @@ public NicSecondaryIp allocateSecondaryGuestIP(final long nicId, IpAddresses req ipaddr = _ipAddrMgr.allocatePublicIpForGuestNic(network, podId, ipOwner, ipv4Address); } if (ipaddr == null && ipv6Address == null) { - throw new InvalidParameterValueException("Allocating ip to guest nic " + nicId + " failed"); + throw new InvalidParameterValueException(String.format("Allocating ip to guest nic %s failed", nicVO)); } } catch (InsufficientAddressCapacityException e) { - logger.error("Allocating ip to guest nic " + nicId + " failed"); + logger.error("Allocating ip to guest nic {} failed", nicVO); return null; } } else { @@ -1017,7 +1017,7 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(network.getNetworkOfferingId()); Long nicId = secIpVO.getNicId(); - logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); + logger.debug("ip = {} nic = {}", secIpVO::toString, () -> _nicDao.findById(nicId)); //check is this the last secondary ip for NIC List ipList = _nicSecondaryIpDao.listByNicId(nicId); boolean lastIp = false; @@ -1031,7 +1031,7 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { throw new InvalidParameterValueException("Invalid zone Id is given"); } - logger.debug("Calling secondary ip " + secIpVO.getIp4Address() + " release "); + logger.debug("Calling secondary ip {} release ", secIpVO); if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { //check PF or static NAT is configured on this IP address String secondaryIp = secIpVO.getIp4Address(); @@ -1048,8 +1048,8 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { //check if the secondary IP associated with any static nat rule IPAddressVO publicIpVO = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secondaryIp); if (publicIpVO != null) { - logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); - throw new InvalidParameterValueException("Can' remove the ip " + secondaryIp + "is associate with static NAT rule public IP address id " + publicIpVO.getId()); + logger.debug("VM nic IP {} is associated with the static NAT rule public IP address id {}", secondaryIp, publicIpVO); + throw new InvalidParameterValueException(String.format("Can' remove the ip %s is associate with static NAT rule public IP address id %s", secondaryIp, publicIpVO)); } if (_loadBalancerDao.isLoadBalancerRulesMappedToVmGuestIp(vm.getId(), secondaryIp, network.getId())) { @@ -1269,7 +1269,7 @@ private boolean releaseIpAddressInternal(long ipAddressId) throws InsufficientAd } if (ipVO.getAllocatedTime() == null) { - logger.debug("Ip Address id= " + ipAddressId + " is not allocated, so do nothing."); + logger.debug("Ip Address {} is not allocated, so do nothing.", ipVO); return true; } @@ -1310,7 +1310,7 @@ private boolean releaseIpAddressInternal(long ipAddressId) throws InsufficientAd return true; } - boolean success = _ipAddrMgr.disassociatePublicIpAddress(ipAddressId, userId, caller); + boolean success = _ipAddrMgr.disassociatePublicIpAddress(ipVO, userId, caller); if (success) { _resourceTagDao.removeByIdAndType(ipAddressId, ResourceObjectType.PublicIpAddress); @@ -1323,7 +1323,7 @@ private boolean releaseIpAddressInternal(long ipAddressId) throws InsufficientAd } } } else { - logger.warn("Failed to release public ip address id=" + ipAddressId); + logger.warn("Failed to release public ip address {}", ipVO); } return success; } @@ -1668,7 +1668,7 @@ public Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapac if (zone.getNetworkType() == NetworkType.Advanced && ntwkOff.getGuestType() == GuestType.Isolated) { ipv6 = _networkOfferingDao.isIpv6Supported(ntwkOff.getId()); if (ipv6) { - ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(zone.getId()); + ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(zone); ip6Gateway = ip6GatewayCidr.first(); ip6Cidr = ip6GatewayCidr.second(); } @@ -2292,7 +2292,7 @@ public Network doInTransaction(TransactionStatus status) throws InsufficientCapa Long aclVpcId = acl.getVpcId(); if (!isDefaultAcl(aclId) && isAclAttachedToVpc(aclVpcId, vpcId)) { - throw new InvalidParameterValueException(String.format("ACL [%s] does not belong to the VPC [%s].", aclId, aclVpcId)); + throw new InvalidParameterValueException(String.format("ACL [%s] does not belong to the VPC [%s].", acl, aclVpcId)); } } network = _vpcMgr.createVpcGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, @@ -2913,9 +2913,9 @@ public boolean restartNetwork(NetworkVO network, boolean cleanup, boolean makeRe long id = network.getId(); boolean success = _networkMgr.restartNetwork(id, callerAccount, user, cleanup, livePatch); if (success) { - logger.debug(String.format("Network id=%d is restarted successfully.",id)); + logger.debug("Network {} is restarted successfully.", network); } else { - logger.warn(String.format("Network id=%d failed to restart.",id)); + logger.warn("Network {} failed to restart.", network); } return success; @@ -3239,7 +3239,7 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { String isUpdateDnsSupported = dnsCapabilities.get(Capability.AllowDnsSuffixModification); if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) { // TBD: use uuid instead of networkOfferingId. May need to hardcode tablename in call to addProxyObject(). - throw new InvalidParameterValueException("Domain name change is not supported by the network offering id=" + networkOfferingId); + throw new InvalidParameterValueException(String.format("Domain name change is not supported by the network offering %s", networkOffering)); } network.setNetworkDomain(domainSuffix); @@ -3427,7 +3427,7 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { if (restartNetwork) { if (validStateToShutdown) { if (!changeCidr) { - logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down elements and resources for network {} as a part of network update", network); if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); @@ -3437,12 +3437,11 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { } } else { // We need to shutdown the network, since we want to re-implement the network. - logger.debug("Shutting down network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down network {} as a part of network update", network); //check if network has reservation if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { - logger.warn( - "Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation."); + logger.warn("Existing IP reservation will become ineffective for the network {} You need to reapply reservation after network reimplementation.", network); //set cidr to the network cidr network.setCidr(network.getNetworkCidr()); //set networkCidr to null to bring network back to no IP reservation state @@ -3494,7 +3493,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { long vmId = nic.getInstanceId(); VMInstanceVO vm = _vmDao.findById(vmId); if (vm == null) { - logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); + logger.error("Vm for nic {} not found with Vm Id: {}", nic, vmId); continue; } long isDefault = (nic.isDefaultNic()) ? 1 : 0; @@ -3685,7 +3684,7 @@ private void updateNetworkIpv6(NetworkVO network, Long networkOfferingId) { } else if (!isIpv6Supported && isIpv6SupportedNew) { Pair ip6GatewayCidr; try { - ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(network.getDataCenterId()); + ip6GatewayCidr = ipv6Service.preAllocateIpv6SubnetForNetwork(_dcDao.findById(network.getDataCenterId())); ipv6Service.assignIpv6SubnetToNetwork(ip6GatewayCidr.second(), network.getId()); } catch (ResourceAllocationException ex) { throw new CloudRuntimeException("unable to allocate IPv6 network", ex); @@ -3937,7 +3936,8 @@ private void verifyAlreadyMigratedTiers(long migratedVpcId, long vpcOfferingId, Vpc migratedVpc = _vpcDao.findById(migratedVpcId); if (migratedVpc.getVpcOfferingId() != vpcOfferingId) { logger.error("The vpc is already partially migrated in a previous run. The provided vpc offering is not the same as the one used during the first migration process."); - throw new InvalidParameterValueException("Failed to resume migrating VPC as VPC offering does not match previously specified VPC offering (" + migratedVpc.getVpcOfferingId() + ")"); + throw new InvalidParameterValueException(String.format("Failed to resume migrating VPC as VPC offering does not match previously specified VPC offering (%s)", + _vpcOfferingDao.findById(migratedVpc.getVpcOfferingId()))); } List migratedTiers = _networksDao.listByVpc(migratedVpcId); @@ -4041,19 +4041,19 @@ private boolean canMoveToPhysicalNetwork(Network network, long oldNetworkOfferin // Type of the network should be the same if (oldNetworkOffering.getGuestType() != newNetworkOffering.getGuestType()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " are of different types, can't upgrade"); + logger.debug("Network offerings {} and {} are of different types, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } // Traffic types should be the same if (oldNetworkOffering.getTrafficType() != newNetworkOffering.getTrafficType()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different traffic types, can't upgrade"); + logger.debug("Network offerings {} and {} have different traffic types, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } // specify ipRanges should be the same if (oldNetworkOffering.isSpecifyIpRanges() != newNetworkOffering.isSpecifyIpRanges()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyIpRangess, can't upgrade"); + logger.debug("Network offerings {} and {} have different values for specifyIpRangess, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } @@ -4088,26 +4088,26 @@ protected boolean canUpgrade(Network network, long oldNetworkOfferingId, long ne // security group service should be the same if (areServicesSupportedByNetworkOffering(oldNetworkOfferingId, Service.SecurityGroup) != areServicesSupportedByNetworkOffering(newNetworkOfferingId, Service.SecurityGroup)) { - logger.debug("Offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different securityGroupProperty, can't upgrade"); + logger.debug("Offerings {} and {} have different securityGroupProperty, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } // tags should be the same if (newNetworkOffering.getTags() != null) { if (oldNetworkOffering.getTags() == null) { - logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); + logger.debug("New network offering id={} has tags and old network offering id={} doesn't, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } if (!com.cloud.utils.StringUtils.areTagsEqual(oldNetworkOffering.getTags(), newNetworkOffering.getTags())) { - logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); + logger.debug("Network offerings {} and {} have different tags, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } } // specify vlan should be the same if (oldNetworkOffering.isSpecifyVlan() != newNetworkOffering.isSpecifyVlan()) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyVlan, can't upgrade"); + logger.debug("Network offerings {} and {} have different values for specifyVlan, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } @@ -4115,7 +4115,7 @@ protected boolean canUpgrade(Network network, long oldNetworkOfferingId, long ne NetworkOffering.NetworkMode oldNetworkMode = oldNetworkOffering.getNetworkMode() == null ? NetworkOffering.NetworkMode.NATTED: oldNetworkOffering.getNetworkMode(); NetworkOffering.NetworkMode newNetworkMode = newNetworkOffering.getNetworkMode() == null ? NetworkOffering.NetworkMode.NATTED: newNetworkOffering.getNetworkMode(); if (!oldNetworkMode.equals(newNetworkMode)) { - logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for network mode, can't upgrade"); + logger.debug("Network offerings {} and {} have different values for network mode, can't upgrade", newNetworkOffering, oldNetworkOffering); return false; } @@ -4140,7 +4140,7 @@ public PhysicalNetwork createPhysicalNetwork(final Long zoneId, final String vne if (Grouping.AllocationState.Enabled == zone.getAllocationState()) { // TBD: Send uuid instead of zoneId; may have to hardcode tablename in call to addProxyObject(). - throw new PermissionDeniedException("Cannot create PhysicalNetwork since the Zone is currently enabled, zone Id: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot create PhysicalNetwork since the Zone is currently enabled, zone: %s", zone)); } NetworkType zoneType = zone.getNetworkType(); @@ -4148,7 +4148,7 @@ public PhysicalNetwork createPhysicalNetwork(final Long zoneId, final String vne if (zoneType == NetworkType.Basic) { if (!_physicalNetworkDao.listByZone(zoneId).isEmpty()) { // TBD: Send uuid instead of zoneId; may have to hardcode tablename in call to addProxyObject(). - throw new CloudRuntimeException("Cannot add the physical network to basic zone id: " + zoneId + ", there is a physical network already existing in this basic Zone"); + throw new CloudRuntimeException(String.format("Cannot add the physical network to basic zone: %s, there is a physical network already existing in this basic Zone", zone)); } } if (tags != null && tags.size() > 1) { @@ -4399,15 +4399,14 @@ public void addOrRemoveVnets(String[] listOfRanges, final PhysicalNetworkVO netw Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { + DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); if (addVnetsFinal != null) { - logger.debug("Adding vnet range " + addVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() - + " as a part of updatePhysicalNetwork call"); + logger.debug("Adding vnet range {} for the physicalNetwork {} and zone {} as a part of updatePhysicalNetwork call", addVnetsFinal.toString(), network, zone); //add vnet takes a list of strings to be added. each string is a vnet. _dcDao.addVnet(network.getDataCenterId(), network.getId(), addVnetsFinal); } if (removeVnetsFinal != null) { - logger.debug("removing vnet range " + removeVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() - + " as a part of updatePhysicalNetwork call"); + logger.debug("removing vnet range {} for the physicalNetwork {} and zone {} as a part of updatePhysicalNetwork call", removeVnetsFinal.toString(), network, zone); //deleteVnets takes a list of strings to be removed. each string is a vnet. _dcVnetDao.deleteVnets(TransactionLegacy.currentTxn(), network.getDataCenterId(), network.getId(), removeVnetsFinal); } @@ -4443,14 +4442,15 @@ private List> validateVlanRange(PhysicalNetworkVO network maxVnet = MAX_VXLAN_VNI; // fail if zone already contains VNI, need to be unique per zone. // since adding a range adds each VNI to the database, need only check min/max + DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); for (String vnet : VnetRange) { - logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); + logger.debug("Looking to see if VNI {} already exists on another network in zone {}", vnet, zone); List vnis = _dcVnetDao.findVnet(network.getDataCenterId(), vnet); if (vnis != null && !vnis.isEmpty()) { for (DataCenterVnetVO vni : vnis) { if (vni.getPhysicalNetworkId() != network.getId()) { - logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); - throw new InvalidParameterValueException("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); + logger.debug("VNI {} already exists on another network in zone ({}), please specify a unique range", vnet, zone); + throw new InvalidParameterValueException(String.format("VNI %s already exists on another network in zone (%s), please specify a unique range", vnet, zone)); } } } @@ -4489,9 +4489,10 @@ private List> validateVlanRange(PhysicalNetworkVO network } public void validateIfServiceOfferingIsActiveAndSystemVmTypeIsDomainRouter(final Long serviceOfferingId) { - logger.debug(String.format("Validating if service offering [%s] is active, and if system VM is of Domain Router type.", serviceOfferingId)); final ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + logger.debug(String.format("Validating if service offering (%s) with id %d is active, and if system VM is of Domain Router type.", serviceOffering, serviceOfferingId)); + if (serviceOffering == null) { throw new InvalidParameterValueException(String.format("Could not find specified service offering [%s].", serviceOfferingId)); } @@ -4545,7 +4546,7 @@ private List getVnetsToremove(PhysicalNetworkVO network, List 0) { - throw new InvalidParameterValueException("physicalnetwork " + network.getId() + " has " + allocated_vnets + " vnets in use"); + throw new InvalidParameterValueException(String.format("physicalnetwork %s has %d vnets in use", network, allocated_vnets)); } return removeVnets; } @@ -4569,7 +4570,7 @@ private List getVnetsToremove(PhysicalNetworkVO network, List result = _dcVnetDao.listAllocatedVnetsInRange(network.getDataCenterId(), network.getId(), start, end); if (!result.isEmpty()) { - throw new InvalidParameterValueException("physicalnetwork " + network.getId() + " has allocated vnets in the range " + start + "-" + end); + throw new InvalidParameterValueException(String.format("physicalnetwork %s has allocated vnets in the range %d-%d", network, start, end)); } // If the range is partially dedicated to an account fail the request @@ -4640,11 +4641,8 @@ private boolean deleteProviders() { for (PhysicalNetworkServiceProviderVO provider : providers) { try { deleteNetworkServiceProvider(provider.getId()); - } catch (ResourceUnavailableException e) { - logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); - return false; - } catch (ConcurrentOperationException e) { - logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); + } catch (ResourceUnavailableException | ConcurrentOperationException e) { + logger.warn("Unable to complete destroy of the physical network provider: {}", provider, e); return false; } } @@ -4785,7 +4783,7 @@ public GuestVlanRange dedicateGuestVlanRange(DedicateGuestVlanRangeCmd cmd) { } else if (!physicalNetwork.getIsolationMethods().isEmpty() && !physicalNetwork.getIsolationMethods().contains("VLAN") && !physicalNetwork.getIsolationMethods().contains("VXLAN")) { - throw new InvalidParameterValueException("Cannot dedicate guest vlan range. " + "Physical isolation type of network " + physicalNetworkId + " is not VLAN nor VXLAN"); + throw new InvalidParameterValueException(String.format("Cannot dedicate guest vlan range. Physical isolation type of network %s is not VLAN nor VXLAN", physicalNetwork)); } // Get the start and end vlan @@ -5052,8 +5050,7 @@ public PhysicalNetworkServiceProvider addProviderToPhysicalNetwork(Long physical } if (_pNSPDao.findByServiceProvider(physicalNetworkId, providerName) != null) { - // TBD: send uuid instead of physicalNetworkId. - throw new CloudRuntimeException("The '" + providerName + "' provider already exists on physical network : " + physicalNetworkId); + throw new CloudRuntimeException(String.format("The '%s' provider already exists on physical network : %s", providerName, network)); } // check if services can be turned off @@ -5164,7 +5161,8 @@ public PhysicalNetworkServiceProvider updateNetworkServiceProvider(Long id, Stri if (state != null) { if (logger.isDebugEnabled()) { - logger.debug("trying to update the state of the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId() + " to state: " + stateStr); + logger.debug("trying to update the state of the service provider {} on physical network: {} to state: {}", + provider::toString, () -> _physicalNetworkDao.findById(provider.getPhysicalNetworkId()), stateStr::toString); } switch (state) { case Enabled: @@ -5231,7 +5229,8 @@ public boolean deleteNetworkServiceProvider(Long id) throws ConcurrentOperationE // shutdown the provider instances ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); if (logger.isDebugEnabled()) { - logger.debug("Shutting down the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId()); + logger.debug("Shutting down the service provider {} on physical network: {}", + provider::toString, () -> _physicalNetworkDao.findById(provider.getPhysicalNetworkId())); } NetworkElement element = _networkModel.getElementImplementingProvider(provider.getProviderName()); if (element == null) { @@ -5291,8 +5290,8 @@ private void checkForPhysicalNetworksWithoutTag(PhysicalNetworkVO physicalNetwor } if (networkWithoutTagCount > 0) { logger.error("Number of physical networks without tags are " + networkWithoutTagCount); - throw new CloudRuntimeException("There are more than 1 physical network without tags in the zone= " + - physicalNetwork.getDataCenterId()); + throw new CloudRuntimeException(String.format("There are more than 1 physical network without tags in the zone: %s", + _dcDao.findById(physicalNetwork.getDataCenterId()))); } } @@ -5533,7 +5532,7 @@ private PhysicalNetworkServiceProvider addDefaultOvsToPhysicalNetwork(long physi } OvsProviderVO element = _ovsProviderDao.findByNspId(nsp.getId()); if (element != null) { - logger.debug("There is already a Ovs element with service provider id " + nsp.getId()); + logger.debug("There is already a Ovs element with service provider {}", nsp); return nsp; } element = new OvsProviderVO(nsp.getId()); @@ -5799,8 +5798,8 @@ public Network doInTransaction(TransactionStatus status) throws ResourceAllocati } else { logger.debug("Private network already exists: " + privateNetwork); //Do not allow multiple private gateways with same Vlan within a VPC - throw new InvalidParameterValueException("Private network for the vlan: " + uriString + " and cidr " + cidr + " already exists " + "for Vpc " + vpcId + " in zone " - + _entityMgr.findById(DataCenter.class, pNtwk.getDataCenterId()).getName()); + throw new InvalidParameterValueException(String.format("Private network for the vlan: %s and cidr %s already exists for Vpc %s in zone %s", + uriString, cidr, _vpcDao.findById(vpcId), _entityMgr.findById(DataCenter.class, pNtwk.getDataCenterId()).getName())); } if (vpcId != null) { //add entry to private_ip_address table @@ -5930,10 +5929,10 @@ public AcquirePodIpCmdResponse allocatePodIp(Account ipOwner, String zoneId, Str throw new InvalidParameterValueException("Invalid zone Id "); } if (_accountMgr.checkAccessAndSpecifyAuthority(caller, zone.getId()) != zone.getId()) { - throw new InvalidParameterValueException("Caller does not have permission for this Zone" + "(" + zoneId + ")"); + throw new InvalidParameterValueException(String.format("Caller does not have permission for this Zone (%s)", zone)); } if (logger.isDebugEnabled()) { - logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + logger.debug("Associate IP address called by the user {} account {}", CallContext.current().getCallingUser(), ipOwner); } return _ipAddrMgr.allocatePodIp(zoneId, podId); @@ -6128,7 +6127,7 @@ private List convertProjectIdsToAccountIds(final Account caller, final Lis } if (!_projectMgr.canAccessProjectAccount(caller, project.getProjectAccountId())) { - throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + projectId); + throw new InvalidParameterValueException(String.format("Account %s can't access project id=%s", caller, project.getUuid())); } accountIds.add(project.getProjectAccountId()); } diff --git a/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java b/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java index d922f8d0018e..06ccc1a63f7a 100644 --- a/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java +++ b/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java @@ -59,9 +59,14 @@ public synchronized boolean processAnswers(long agentId, long seq, Answer[] resp } @Override - public synchronized boolean processDisconnect(long agentId, Status state) { + public boolean processDisconnect(long agentId, Status state) { + return processDisconnect(agentId, null, null, state); + } + + @Override + public synchronized boolean processDisconnect(long agentId, String uuid, String name, Status state) { if (logger.isTraceEnabled()) - logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); + logger.trace("Agent disconnected, agent [id: {}, uuid: {}, name: {}, state: {}]. Will notify waiters", agentId, uuid, name, state); return true; } @@ -93,7 +98,7 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) Commands c = new Commands(cmds); _agentMgr.send(host.getId(), c, this); } catch (AgentUnavailableException e) { - logger.debug("Failed to send keys to agent: " + host.getId()); + logger.debug("Failed to send keys to agent: {}", host); } } } diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java index cf6aab6a7bb5..04d4c8d2d621 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.network.as; +import com.cloud.user.Account; import org.apache.cloudstack.framework.config.ConfigKey; public interface AutoScaleManager extends AutoScaleService { @@ -40,9 +41,9 @@ public interface AutoScaleManager extends AutoScaleService { void checkAutoScaleUser(Long autoscaleUserId, long accountId); - boolean deleteAutoScaleVmGroupsByAccount(Long accountId); + boolean deleteAutoScaleVmGroupsByAccount(Account account); - void cleanUpAutoScaleResources(Long accountId); + void cleanUpAutoScaleResources(Account account); void doScaleUp(long groupId, Integer numVm); diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 5e7a4a0c4efc..cff889b80f49 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -590,7 +590,7 @@ public AutoScaleVmProfile createAutoScaleVmProfile(CreateAutoScaleVmProfileCmd c } profileVO = checkValidityAndPersist(profileVO, true); - logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId()); + logger.info("Successfully create AutoScale Vm Profile: {}", profileVO); return profileVO; } @@ -671,7 +671,7 @@ public AutoScaleVmProfile updateAutoScaleVmProfile(UpdateAutoScaleVmProfileCmd c } vmProfile = checkValidityAndPersist(vmProfile, false); - logger.info("Updated Auto Scale Vm Profile id:" + vmProfile.getId()); + logger.info("Updated Auto Scale Vm Profile:{}", vmProfile); return vmProfile; } @@ -680,14 +680,14 @@ public AutoScaleVmProfile updateAutoScaleVmProfile(UpdateAutoScaleVmProfileCmd c @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEVMPROFILE_DELETE, eventDescription = "deleting autoscale vm profile") public boolean deleteAutoScaleVmProfile(long id) { /* Check if entity is in database */ - getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Vm Profile", id, autoScaleVmProfileDao); + AutoScaleVmProfileVO vmProfile = getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Vm Profile", id, autoScaleVmProfileDao); if (autoScaleVmGroupDao.isProfileInUse(id)) { throw new InvalidParameterValueException("Cannot delete AutoScale Vm Profile when it is in use by one more vm groups"); } boolean success = autoScaleVmProfileDao.remove(id); if (success) { - logger.info("Successfully deleted AutoScale Vm Profile with Id: " + id); + logger.info("Successfully deleted AutoScale Vm Profile: {}", vmProfile); } return success; } @@ -821,7 +821,7 @@ public AutoScalePolicy createAutoScalePolicy(CreateAutoScalePolicyCmd cmd) { AutoScalePolicyVO policyVO = new AutoScalePolicyVO(cmd.getName(), cmd.getDomainId(), cmd.getAccountId(), duration, quietTime, null, scaleAction); policyVO = checkValidityAndPersist(policyVO, cmd.getConditionIds()); - logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId()); + logger.info("Successfully created AutoScale Policy: {}", policyVO); return policyVO; } @@ -830,7 +830,7 @@ public AutoScalePolicy createAutoScalePolicy(CreateAutoScalePolicyCmd cmd) { @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEPOLICY_DELETE, eventDescription = "deleting autoscale policy") public boolean deleteAutoScalePolicy(final long id) { /* Check if entity is in database */ - getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Policy", id, autoScalePolicyDao); + AutoScalePolicyVO policy = getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Policy", id, autoScalePolicyDao); if (autoScaleVmGroupPolicyMapDao.isAutoScalePolicyInUse(id)) { throw new InvalidParameterValueException("Cannot delete AutoScale Policy when it is in use by one or more AutoScale Vm Groups"); @@ -850,7 +850,7 @@ public Boolean doInTransaction(TransactionStatus status) { logger.warn("Failed to remove AutoScale Policy Condition mappings"); return false; } - logger.info("Successfully deleted autoscale policy id : " + id); + logger.info("Successfully deleted autoscale policy: {}", policy); return success; } @@ -1006,7 +1006,7 @@ public AutoScalePolicy updateAutoScalePolicy(UpdateAutoScalePolicyCmd cmd) { } policy = checkValidityAndPersist(policy, conditionIds); - logger.info("Successfully updated Auto Scale Policy id:" + policyId); + logger.info("Successfully updated Auto Scale Policy: {}", policy); if (CollectionUtils.isNotEmpty(conditionIds)) { markStatisticsAsInactive(null, policyId); @@ -1049,7 +1049,7 @@ public AutoScaleVmGroup createAutoScaleVmGroup(CreateAutoScaleVmGroupCmd cmd) { } vmGroupVO = checkValidityAndPersist(vmGroupVO, cmd.getScaleUpPolicyIds(), cmd.getScaleDownPolicyIds()); - logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId()); + logger.info("Successfully created Autoscale Vm Group: {}", vmGroupVO); createInactiveDummyRecord(vmGroupVO.getId()); scheduleMonitorTask(vmGroupVO.getId()); @@ -1097,12 +1097,12 @@ public boolean deleteAutoScaleVmGroup(final long id, final Boolean cleanup) { } if (!autoScaleVmGroupVO.getState().equals(AutoScaleVmGroup.State.DISABLED) && !Boolean.TRUE.equals(cleanup)) { - throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group id : %d because it is in %s state. Please disable it or pass cleanup=true flag which will destroy all VMs.", id, autoScaleVmGroupVO.getState())); + throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group : %s because it is in %s state. Please disable it or pass cleanup=true flag which will destroy all VMs.", autoScaleVmGroupVO, autoScaleVmGroupVO.getState())); } Integer currentVM = autoScaleVmGroupVmMapDao.countByGroup(id); if (currentVM > 0 && !Boolean.TRUE.equals(cleanup)) { - throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group id : %d because there are %d VMs. Please remove the VMs or pass cleanup=true flag which will destroy all VMs.", id, currentVM)); + throw new InvalidParameterValueException(String.format("Cannot delete autoscale vm group : %s because there are %d VMs. Please remove the VMs or pass cleanup=true flag which will destroy all VMs.", autoScaleVmGroupVO, currentVM)); } AutoScaleVmGroup.State bakupState = autoScaleVmGroupVO.getState(); @@ -1125,7 +1125,7 @@ public boolean deleteAutoScaleVmGroup(final long id, final Boolean cleanup) { autoScaleVmGroupDao.persist(autoScaleVmGroupVO); } finally { if (!success) { - logger.warn("Could not delete AutoScale Vm Group id : " + id); + logger.warn("Could not delete AutoScale Vm Group : {}", autoScaleVmGroupVO); return false; } } @@ -1163,7 +1163,7 @@ public Boolean doInTransaction(TransactionStatus status) { return false; } - logger.info("Successfully deleted autoscale vm group id : " + id); + logger.info("Successfully deleted autoscale vm group: {}", autoScaleVmGroupVO); return success; // Successfull } }); @@ -1368,7 +1368,7 @@ public AutoScaleVmGroup updateAutoScaleVmGroup(UpdateAutoScaleVmGroupCmd cmd) { vmGroupVO = checkValidityAndPersist(vmGroupVO, scaleUpPolicyIds, scaleDownPolicyIds); if (vmGroupVO != null) { - logger.debug("Updated Auto Scale VmGroup id:" + vmGroupId); + logger.debug("Updated Auto Scale VmGroup: {}", vmGroupVO); if ((interval != null && interval != currentInterval) || CollectionUtils.isNotEmpty(scaleUpPolicyIds) || CollectionUtils.isNotEmpty(scaleDownPolicyIds)) { markStatisticsAsInactive(vmGroupId, null); @@ -1404,10 +1404,10 @@ public AutoScaleVmGroup enableAutoScaleVmGroup(Long id) { autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - logger.warn("Failed to enable AutoScale Vm Group id : " + id); + logger.warn("Failed to enable AutoScale Vm Group: {}", vmGroup); return null; } - logger.info("Successfully enabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully enabled AutoScale Vm Group: {}", vmGroup); createInactiveDummyRecord(vmGroup.getId()); } return vmGroup; @@ -1439,10 +1439,10 @@ public AutoScaleVmGroup disableAutoScaleVmGroup(Long id) { autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - logger.warn("Failed to disable AutoScale Vm Group id : " + id); + logger.warn("Failed to disable AutoScale Vm Group: {}", vmGroup); return null; } - logger.info("Successfully disabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully disabled AutoScale Vm Group: {}", vmGroup); } return vmGroup; } @@ -1505,7 +1505,7 @@ public Condition createCondition(CreateConditionCmd cmd) { ConditionVO condition = null; condition = conditionDao.persist(new ConditionVO(cid, threshold, owner.getAccountId(), owner.getDomainId(), op)); - logger.info("Successfully created condition with Id: " + condition.getId()); + logger.info("Successfully created condition: {}", condition); CallContext.current().setEventDetails(" Id: " + condition.getId()); return condition; @@ -1581,13 +1581,13 @@ public boolean deleteCounter(long counterId) throws ResourceInUseException { ConditionVO condition = conditionDao.findByCounterId(counterId); if (condition != null) { - logger.info("Cannot delete counter " + counter.getName() + " as it is being used in a condition."); + logger.info("Cannot delete counter {} as it is being used in a condition.", counter); throw new ResourceInUseException("Counter is in use."); } boolean success = counterDao.remove(counterId); if (success) { - logger.info("Successfully deleted counter with Id: " + counterId); + logger.info("Successfully deleted counter: {}", counter); } return success; @@ -1604,12 +1604,12 @@ public boolean deleteCondition(long conditionId) throws ResourceInUseException { // Verify if condition is used in any autoscale policy if (autoScalePolicyConditionMapDao.isConditionInUse(conditionId)) { - logger.info("Cannot delete condition " + conditionId + " as it is being used in a condition."); + logger.info("Cannot delete condition {} as it is being used in a condition.", condition); throw new ResourceInUseException("Cannot delete Condition when it is in use by one or more AutoScale Policies."); } boolean success = conditionDao.remove(conditionId); if (success) { - logger.info("Successfully deleted condition " + condition.getId()); + logger.info("Successfully deleted condition {}", condition); } return success; } @@ -1656,7 +1656,7 @@ public Condition updateCondition(UpdateConditionCmd cmd) throws ResourceInUseExc sc2.setJoinParameters("policySearch", "policyId", policyIds.toArray((new Object[policyIds.size()]))); List groups = autoScaleVmGroupDao.search(sc2, null); if (CollectionUtils.isNotEmpty(groups)) { - String msg = String.format("Cannot update condition %d as it is being used in %d vm groups NOT in Disabled state.", conditionId, groups.size()); + String msg = String.format("Cannot update condition %s as it is being used in %d vm groups NOT in Disabled state.", condition, groups.size()); logger.info(msg); throw new ResourceInUseException(msg); } @@ -1666,7 +1666,7 @@ public Condition updateCondition(UpdateConditionCmd cmd) throws ResourceInUseExc condition.setThreshold(threshold); boolean success = conditionDao.update(conditionId, condition); if (success) { - logger.info("Successfully updated condition " + condition.getId()); + logger.info("Successfully updated condition {}", condition); for (Long policyId : policyIds) { markStatisticsAsInactive(null, policyId); @@ -1676,16 +1676,16 @@ public Condition updateCondition(UpdateConditionCmd cmd) throws ResourceInUseExc } @Override - public boolean deleteAutoScaleVmGroupsByAccount(Long accountId) { + public boolean deleteAutoScaleVmGroupsByAccount(Account account) { boolean success = true; - List groups = autoScaleVmGroupDao.listByAccount(accountId); + List groups = autoScaleVmGroupDao.listByAccount(account.getId()); for (AutoScaleVmGroupVO group : groups) { - logger.debug("Deleting AutoScale Vm Group " + group + " for account Id: " + accountId); + logger.debug("Deleting AutoScale Vm Group {} for account: {}", group, account); try { deleteAutoScaleVmGroup(group.getId(), true); - logger.debug("AutoScale Vm Group " + group + " has been successfully deleted for account Id: " + accountId); + logger.debug("AutoScale Vm Group {} has been successfully deleted for account: {}", group, account); } catch (Exception e) { - logger.warn("Failed to delete AutoScale Vm Group " + group + " for account Id: " + accountId + " due to: ", e); + logger.warn("Failed to delete AutoScale Vm Group {} for account: {} due to: ", group, account, e); success = false; } } @@ -1693,20 +1693,20 @@ public boolean deleteAutoScaleVmGroupsByAccount(Long accountId) { } @Override - public void cleanUpAutoScaleResources(Long accountId) { + public void cleanUpAutoScaleResources(Account account) { // cleans Autoscale VmProfiles, AutoScale Policies and Conditions belonging to an account int count = 0; - count = autoScaleVmProfileDao.removeByAccountId(accountId); + count = autoScaleVmProfileDao.removeByAccountId(account.getId()); if (count > 0) { - logger.debug("Deleted " + count + " AutoScale Vm Profile for account Id: " + accountId); + logger.debug("Deleted {} AutoScale Vm Profile for account: {}", count, account); } - count = autoScalePolicyDao.removeByAccountId(accountId); + count = autoScalePolicyDao.removeByAccountId(account.getId()); if (count > 0) { - logger.debug("Deleted " + count + " AutoScale Policies for account Id: " + accountId); + logger.debug("Deleted {} AutoScale Policies for account: {}", count, account); } - count = conditionDao.removeByAccountId(accountId); + count = conditionDao.removeByAccountId(account.getId()); if (count > 0) { - logger.debug("Deleted " + count + " Conditions for account Id: " + accountId); + logger.debug("Deleted {} Conditions for account: {}", count, account); } } @@ -1743,12 +1743,12 @@ protected Map getDeployParams (String otherDeployParams) { return deployParams; } - protected long createNewVM(AutoScaleVmGroupVO asGroup) { + protected UserVm createNewVM(AutoScaleVmGroupVO asGroup) { AutoScaleVmProfileVO profileVo = autoScaleVmProfileDao.findById(asGroup.getProfileId()); long templateId = profileVo.getTemplateId(); long serviceOfferingId = profileVo.getServiceOfferingId(); if (templateId == -1) { - return -1; + return null; } // create new VM into DB try { @@ -1778,7 +1778,7 @@ protected long createNewVM(AutoScaleVmGroupVO asGroup) { if (!zone.isLocalStorageEnabled()) { if (diskOffering.isUseLocalStorage()) { - throw new InvalidParameterValueException("Zone is not configured to use local storage but disk offering " + diskOffering.getName() + " associated to the service offering " + serviceOffering.getName() + " uses it"); + throw new InvalidParameterValueException(String.format("Zone is not configured to use local storage but disk offering %s associated to the service offering %s uses it", diskOffering, serviceOffering)); } } @@ -1828,11 +1828,7 @@ protected long createNewVM(AutoScaleVmGroupVO asGroup) { } } - if (vm != null) { - return vm.getId(); - } else { - return -1; - } + return vm; } catch (InsufficientCapacityException ex) { logger.info(ex); logger.trace(ex.getMessage(), ex); @@ -1974,10 +1970,10 @@ public void checkAutoScaleVmGroupName(String groupName) { } } - private boolean startNewVM(long vmId) { + private UserVmVO startNewVM(long vmId) { try { CallContext.current().setEventDetails("Vm Id: " + vmId); - userVmMgr.startVirtualMachine(vmId, null, new HashMap<>(), null); + return userVmMgr.startVirtualMachine(vmId, null, new HashMap<>(), null).first(); } catch (final ResourceUnavailableException ex) { logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); @@ -1998,7 +1994,6 @@ private boolean startNewVM(long vmId) { logger.info(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } - return true; } private boolean assignLBruleToNewVm(long vmId, AutoScaleVmGroupVO asGroup) { @@ -2056,7 +2051,7 @@ public void doScaleUp(long groupId, Integer numVm) { AutoScaleVmGroup.State oldState = asGroup.getState(); AutoScaleVmGroup.State newState = AutoScaleVmGroup.State.SCALING; if (!autoScaleVmGroupDao.updateState(groupId, oldState, newState)) { - logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); + logger.error("Can not update vmgroup state from {} to {}, groupId: {}", oldState, newState, asGroup); return; } try { @@ -2064,23 +2059,22 @@ public void doScaleUp(long groupId, Integer numVm) { ActionEventUtils.onStartedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, "Scaling Up AutoScale VM group " + groupId, groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), true, 0); - long vmId = createNewVM(asGroup); - if (vmId == -1) { - logger.error("Can not deploy new VM for scaling up in the group " - + asGroup.getId() + ". Waiting for next round"); + UserVm vm = createNewVM(asGroup); + if (vm == null) { + logger.error("Can not deploy new VM for scaling up in the group {}. Waiting for next round", asGroup); break; } // persist to DB - AutoScaleVmGroupVmMapVO groupVmMapVO = new AutoScaleVmGroupVmMapVO(asGroup.getId(), vmId); + AutoScaleVmGroupVmMapVO groupVmMapVO = new AutoScaleVmGroupVmMapVO(asGroup.getId(), vm.getId()); autoScaleVmGroupVmMapDao.persist(groupVmMapVO); // Add an Inactive-dummy record to statistics table createInactiveDummyRecord(asGroup.getId()); try { - startNewVM(vmId); + startNewVM(vm.getId()); createInactiveDummyRecord(asGroup.getId()); - if (assignLBruleToNewVm(vmId, asGroup)) { + if (assignLBruleToNewVm(vm.getId(), asGroup)) { // update last_quietTime List groupPolicyVOs = autoScaleVmGroupPolicyMapDao .listByVmGroupId(groupId); @@ -2094,25 +2088,24 @@ public void doScaleUp(long groupId, Integer numVm) { } } ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, - String.format("Started and assigned LB rule for VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Started and assigned LB rule for VM %s in AutoScale VM group %s", vm, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } else { logger.error("Can not assign LB rule for this new VM"); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, - String.format("Failed to assign LB rule for VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to assign LB rule for VM %s in AutoScale VM group %s", vm, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); break; } } catch (ServerApiException e) { - logger.error("Can not deploy new VM for scaling up in the group " - + asGroup.getId() + ". Waiting for next round"); + logger.error("Can not deploy new VM for scaling up in the group {}. Waiting for next round", asGroup); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, - String.format("Failed to start VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); - destroyVm(vmId); + String.format("Failed to start VM %s in AutoScale VM group %s", vm, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + destroyVm(vm.getId()); break; } } } finally { if (!autoScaleVmGroupDao.updateState(groupId, newState, oldState)) { - logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); + logger.error("Can not update vmgroup state from {} back to {}, group: {}", newState, oldState, asGroup); } } } @@ -2130,20 +2123,20 @@ public void doScaleDown(final long groupId) { AutoScaleVmGroup.State oldState = asGroup.getState(); AutoScaleVmGroup.State newState = AutoScaleVmGroup.State.SCALING; if (!autoScaleVmGroupDao.updateState(groupId, oldState, newState)) { - logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); + logger.error("Can not update vmgroup state from {} to {}, groupId: {}", oldState, newState, asGroup); return; } ActionEventUtils.onStartedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - "Scaling down AutoScale VM group " + groupId, groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), + String.format("Scaling down AutoScale VM group %s", asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), true, 0); try { long vmId = -1; try { vmId = removeLBrule(asGroup); } catch (Exception ex) { - logger.info("Got exception when remove LB rule for a VM in AutoScale VM group %d: " + groupId, ex); + logger.info("Got exception when remove LB rule for a VM in AutoScale VM group: {}", asGroup, ex); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Failed to remove LB rule for a VM in AutoScale VM group %d", groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to remove LB rule for a VM in AutoScale VM group %s", asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); throw ex; } if (vmId != -1) { @@ -2178,19 +2171,19 @@ public void doScaleDown(final long groupId) { } if (destroyVm(vmId)) { ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Destroyed VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Destroyed VM %d in AutoScale VM group %s", vmId, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } else { ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Failed to destroy VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to destroy VM %d in AutoScale VM group %s", vmId, asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } } else { logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more."); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, - String.format("Failed to remove LB rule for a VM in AutoScale VM group %d", groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); + String.format("Failed to remove LB rule for a VM in AutoScale VM group %s", asGroup), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } } finally { if (!autoScaleVmGroupDao.updateState(groupId, newState, oldState)) { - logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); + logger.error("Can not update vmgroup state from {} back to {}, groupId: {}", newState, oldState, asGroup); } } } @@ -2328,7 +2321,7 @@ protected Map> getPolicyCounters(AutoScaleVmGroupTO groupT } protected AutoScalePolicy.Action getAutoscaleAction(Map countersMap, Map countersNumberMap, AutoScaleVmGroupTO groupTO) { - logger.debug("[AutoScale] Getting autoscale action for group : " + groupTO.getId()); + logger.debug("[AutoScale] Getting autoscale action for group [id={}, uuid={}]", groupTO.getId(), groupTO.getUuid()); Network.Provider provider = getLoadBalancerServiceProvider(groupTO.getLoadBalancerId()); @@ -2367,10 +2360,12 @@ protected AutoScalePolicy.Action checkConditionsForPolicy(Map co } Double sum = countersMap.get(key); Integer number = countersNumberMap.get(key); - logger.debug(String.format("Checking policyId = %d, conditionId = %d, counter = \"%s\", sum = %f, number = %s", policyTO.getId(), conditionTO.getId(), counter.getName(), sum, number)); + logger.debug("Checking policy [id: {}, uuid: {}], condition [id: {}, uuid: {}], counter = \"{}\", sum = {}, number = {}", + policyTO.getId(), policyTO.getUuid(), conditionTO.getId(), conditionTO.getUuid(), counter.getName(), sum, number); if (number == null || number == 0) { bValid = false; - logger.debug(String.format("Skipping policyId = %d, conditionId = %d, counter = \"%s\" because the number is %s", policyTO.getId(), conditionTO.getId(), counter.getName(), number)); + logger.debug("Skipping policy [id: {}, uuid: {}], condition [id: {}, uuid: {}], counter = \"{}\" because the number is {}", + policyTO.getId(), policyTO.getUuid(), conditionTO.getId(), conditionTO.getUuid(), counter.getName(), number); break; } Double avg = sum / number; @@ -2381,9 +2376,9 @@ protected AutoScalePolicy.Action checkConditionsForPolicy(Map co || ((op == com.cloud.network.as.Condition.Operator.LE) && (avg.doubleValue() <= thresholdPercent.doubleValue())) || ((op == com.cloud.network.as.Condition.Operator.LT) && (avg.doubleValue() < thresholdPercent.doubleValue())); - logger.debug(String.format("Check result on policyId = %d, conditionId = %d, counter = %s is : %s" + - " (actual result = %f, operator = %s, threshold = %f)", - policyTO.getId(), conditionTO.getId(), counter.getSource(), bConditionCheck, avg, op, thresholdPercent)); + logger.debug("Check result on policy [id: {}, uuid: {}], condition [id: {}, uuid: {}], counter = {} is : {}" + + " (actual result = {}, operator = {}, threshold = {})", + policyTO.getId(), policyTO.getUuid(), conditionTO.getId(), conditionTO.getUuid(), counter.getSource(), bConditionCheck, avg, op, thresholdPercent); if (!bConditionCheck) { bValid = false; @@ -2391,7 +2386,7 @@ protected AutoScalePolicy.Action checkConditionsForPolicy(Map co } } AutoScalePolicy.Action action = bValid ? policyTO.getAction() : null; - logger.debug(String.format("Check result on policyId = %d is %s", policyTO.getId(), action)); + logger.debug("Check result on policy [id: {}, uuid: {}] is {}", policyTO.getId(), policyTO.getUuid(), action); return action; } @@ -2423,7 +2418,7 @@ protected Network getNetwork(Long loadBalancerId) { } Network network = networkDao.findById(loadBalancer.getNetworkId()); if (network == null) { - throw new CloudRuntimeException(String.format("Unable to find network with id: %s ", loadBalancer.getNetworkId())); + throw new CloudRuntimeException(String.format("Unable to find network with id: %s for load balancer: %s", loadBalancer.getNetworkId(), loadBalancer)); } return network; } @@ -2435,7 +2430,7 @@ protected Pair getPublicIpAndPort(Long loadBalancerId) { } IPAddressVO ipAddress = ipAddressDao.findById(loadBalancer.getSourceIpAddressId()); if (ipAddress == null) { - throw new CloudRuntimeException(String.format("Unable to find IP Address with id: %s ", loadBalancer.getSourceIpAddressId())); + throw new CloudRuntimeException(String.format("Unable to find IP Address with id: %s for load balancer: %s", loadBalancer.getSourceIpAddressId(), loadBalancer)); } return new Pair<>(ipAddress.getAddress().addr(), loadBalancer.getSourcePortStart()); } @@ -2528,7 +2523,7 @@ protected void checkNetScalerAsGroup(AutoScaleVmGroupVO asGroup) { AutoScalePolicy.Action scaleAction = getAutoscaleAction(countersMap, countersNumberMap, groupTO); if (scaleAction != null) { - logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: {} for group {}", scaleAction, asGroup); if (AutoScalePolicy.Action.SCALEUP.equals(scaleAction)) { doScaleUp(asGroup.getId(), 1); } else { @@ -2574,13 +2569,14 @@ protected void processPerformanceMonitorAnswer(Map countersMap, Long counterId = Long.parseLong(counterVm[1]); - Long conditionId = Long.parseLong(params.get("con" + counterVm[0])); + ConditionTO condition = new ConditionTO(Long.parseLong(params.get("con" + counterVm[0])), null, 0L, null, null); - Long policyId = 0L; // For NetScaler, the policyId is not returned in PerformanceMonitorAnswer + // For NetScaler, the policyId is not returned in PerformanceMonitorAnswer + AutoScalePolicyTO policy = new AutoScalePolicyTO(0L, null, 0, 0, null, null, null, false); Double coVal = Double.parseDouble(counterVals[1]); - updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counterId, conditionId, policyId, coVal, AutoScaleValueType.INSTANT_VM); + updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counterId, condition, policy, coVal, AutoScaleValueType.INSTANT_VM); } catch (Exception e) { logger.error("Cannot process PerformanceMonitorAnswer due to Exception: ", e); @@ -2590,9 +2586,9 @@ protected void processPerformanceMonitorAnswer(Map countersMap, } protected void updateCountersMapWithInstantData(Map countersMap, Map countersNumberMap, AutoScaleVmGroupTO groupTO, - Long counterId, Long conditionId, Long policyId, Double coVal, AutoScaleValueType valueType) { + Long counterId, ConditionTO condition, AutoScalePolicyTO policy, Double coVal, AutoScaleValueType valueType) { // Summary of all counter by counterId key - String key = generateKeyFromPolicyAndConditionAndCounter(policyId, conditionId, counterId); + String key = generateKeyFromPolicyAndConditionAndCounter(policy.getId(), condition.getId(), counterId); CounterVO counter = counterDao.findById(counterId); if (counter == null) { @@ -2614,7 +2610,9 @@ protected void updateCountersMapWithInstantData(Map countersMap, if (AutoScaleValueType.INSTANT_VM_GROUP.equals(valueType)) { Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(groupTO.getId()); if (currentVM == 0) { - logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupTO.getId(), policyId, counterId)); + logger.debug("Skipping updating countersMap for group [id={}, uuid={}] " + + "and policy [id={}, uuid={}] and counter [id={}, uuid={}] due to no VMs", + groupTO.getId(), groupTO.getUuid(), policy.getId(), policy.getUuid(), counter.getId(), counter.getUuid()); return; } coVal = coVal / currentVM; @@ -2682,7 +2680,7 @@ protected void checkVirtualRouterAsGroup(AutoScaleVmGroupVO asGroup) { // get scale action AutoScalePolicy.Action scaleAction = getAutoscaleAction(countersMap, countersNumberMap, groupTO); if (scaleAction != null) { - logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: {} for group {}", scaleAction, asGroup); if (AutoScalePolicy.Action.SCALEUP.equals(scaleAction)) { doScaleUp(asGroup.getId(), 1); } else { @@ -2722,7 +2720,7 @@ protected void getVmStatsFromHosts(AutoScaleVmGroupTO groupTO) { logger.warn("Got empty result for virtual machine statistics from host: " + host); } } catch (Exception e) { - logger.debug("Failed to get VM stats from host : " + host.getName()); + logger.debug("Failed to get VM stats from host : {}", host); } return vmStatsById; } @@ -2779,7 +2777,7 @@ protected void getNetworkStatsFromVirtualRouter(AutoScaleVmGroupTO groupTO) { command.setWait(30); GetAutoScaleMetricsAnswer answer = (GetAutoScaleMetricsAnswer) agentMgr.easySend(router.getHostId(), command); if (answer == null || !answer.getResult()) { - logger.error("Failed to get autoscale metrics from virtual router " + router.getName()); + logger.error("Failed to get autoscale metrics from virtual router {}", router); processGetAutoScaleMetricsAnswer(groupTO, new ArrayList<>(), router.getId()); } else { processGetAutoScaleMetricsAnswer(groupTO, answer.getValues(), router.getId()); @@ -2838,24 +2836,26 @@ protected void processGetAutoScaleMetricsAnswer(AutoScaleVmGroupTO groupTO, List } protected boolean updateCountersMap(AutoScaleVmGroupTO groupTO, Map countersMap, Map countersNumberMap) { - logger.debug("Updating countersMap for as group: " + groupTO.getId()); + logger.debug("Updating countersMap for as group [id={}, uuid={}]", groupTO.getId(), groupTO.getUuid()); for (AutoScalePolicyTO policyTO : groupTO.getPolicies()) { Date afterDate = new Date(System.currentTimeMillis() - ((long)policyTO.getDuration() << 10)); List dummyStats = asGroupStatisticsDao.listDummyRecordsByVmGroup(groupTO.getId(), afterDate); if (CollectionUtils.isNotEmpty(dummyStats)) { - logger.error(String.format("Failed to update counters map as there are %d dummy statistics in as group %d", dummyStats.size(), groupTO.getId())); + logger.error("Failed to update counters map as there are {} dummy statistics in as group {}", dummyStats.size(), groupTO.getId()); return false; } List inactiveStats = asGroupStatisticsDao.listInactiveByVmGroupAndPolicy(groupTO.getId(), policyTO.getId(), afterDate); if (CollectionUtils.isNotEmpty(inactiveStats)) { - logger.error(String.format("Failed to update counters map as there are %d Inactive statistics in as group %d and policy %s", inactiveStats.size(), groupTO.getId(), policyTO.getId())); + logger.error("Failed to update counters map as there are {} Inactive " + + "statistics in as group [id={}, uuid={}] and policy [id={}, uuid={}]", + inactiveStats.size(), groupTO.getId(), groupTO.getUuid(), policyTO.getId(), policyTO.getUuid()); continue; } for (ConditionTO conditionTO : policyTO.getConditions()) { updateCountersMapPerCondition(groupTO, policyTO, conditionTO, afterDate, countersMap, countersNumberMap); } } - logger.debug("DONE Updating countersMap for as group: " + groupTO.getId()); + logger.debug("DONE Updating countersMap for as group [id={}, uuid={}]", groupTO.getId(), groupTO.getUuid()); return true; } @@ -2865,15 +2865,17 @@ private void updateCountersMapPerCondition(AutoScaleVmGroupTO groupTO, AutoScale CounterTO counter = conditionTO.getCounter(); List stats = asGroupStatisticsDao.listByVmGroupAndPolicyAndCounter(groupTO.getId(), policyTO.getId(), counter.getId(), afterDate); if (CollectionUtils.isEmpty(stats)) { - logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no stats", groupTO.getId(), policyTO.getId(), counter.getId())); + logger.debug("Skipping updating countersMap for group {} and policy {} and " + + "counter {} due to no stats", groupTO.getId(), groupTO.getUuid(), policyTO.getId()); return; } - logger.debug(String.format("Updating countersMap with %d stats for group %s and policy %s and counter %s", stats.size(), groupTO.getId(), policyTO.getId(), counter.getId())); + logger.debug("Updating countersMap with {} stats for group {} and policy {} and " + + "counter {}", stats.size(), groupTO.getId(), groupTO.getUuid(), policyTO.getId()); Map> aggregatedRecords = new HashMap<>(); List incorrectRecords = new ArrayList<>(); for (AutoScaleVmGroupStatisticsVO stat : stats) { if (Arrays.asList(AutoScaleValueType.INSTANT_VM, AutoScaleValueType.INSTANT_VM_GROUP).contains(stat.getValueType())) { - updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counter.getId(), conditionId, policyTO.getId(), stat.getRawValue(), stat.getValueType()); + updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counter.getId(), conditionTO, policyTO, stat.getRawValue(), stat.getValueType()); } else if (Arrays.asList(AutoScaleValueType.AGGREGATED_VM, AutoScaleValueType.AGGREGATED_VM_GROUP).contains(stat.getValueType())) { String key = stat.getCounterId() + "-" + stat.getResourceId(); if (incorrectRecords.contains(key)) { @@ -2899,12 +2901,12 @@ private void updateCountersMapPerCondition(AutoScaleVmGroupTO groupTO, AutoScale } } - updateCountersMapByAggregatedRecords(countersMap, countersNumberMap, aggregatedRecords, conditionId, policyTO.getId(), groupTO.getId()); + updateCountersMapByAggregatedRecords(countersMap, countersNumberMap, aggregatedRecords, conditionTO, policyTO, groupTO); } public void updateCountersMapByAggregatedRecords(Map countersMap, Map countersNumberMap, Map> aggregatedRecords, - Long conditionId, Long policyId, Long groupId) { + ConditionTO condition, AutoScalePolicyTO policy, AutoScaleVmGroupTO group) { if (MapUtils.isNotEmpty(aggregatedRecords)) { logger.debug("Processing aggregated data"); for (Map.Entry> aggregatedRecord : aggregatedRecords.entrySet()) { @@ -2912,21 +2914,24 @@ public void updateCountersMapByAggregatedRecords(Map countersMap Long counterId = Long.valueOf(recordKey.split("-")[0]); List records = aggregatedRecord.getValue(); if (records.size() <= 1) { - logger.info(String.format("Ignoring aggregated records, conditionId = %s, counterId = %s", conditionId, counterId)); + logger.info(String.format("Ignoring aggregated records, condition [id=%d, uuid=%s], counterId = %s", + condition.getId(), condition.getUuid(), counterId)); continue; } AutoScaleVmGroupStatisticsVO firstRecord = records.get(0); AutoScaleVmGroupStatisticsVO lastRecord = records.get(records.size() - 1); Double coVal = (lastRecord.getRawValue() - firstRecord.getRawValue()) * 1000 / (lastRecord.getCreated().getTime() - firstRecord.getCreated().getTime()); if (AutoScaleValueType.AGGREGATED_VM_GROUP.equals(firstRecord.getValueType())) { - Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(groupId); + Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(group.getId()); if (currentVM == 0) { - logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupId, policyId, counterId)); + logger.debug("Skipping updating countersMap for group [id={}," + + " uuid={}] and policy [id={}, uuid={}] and counter {} due to no VMs", + group.getId(), group.getUuid(), policy.getId(), policy.getUuid(), counterId); return; } coVal = coVal / currentVM; } - String key = generateKeyFromPolicyAndConditionAndCounter(policyId, conditionId, counterId); + String key = generateKeyFromPolicyAndConditionAndCounter(policy.getId(), condition.getId(), counterId); updateCountersMapWithProcessedData(countersMap, countersNumberMap, key, coVal); } } @@ -2943,14 +2948,15 @@ protected void cleanupAsVmGroupStatistics(AutoScaleVmGroupTO groupTO) { Integer duration = policyTO.getDuration(); Integer delaySecs = cleanupDelay >= duration ? cleanupDelay : duration; Date beforeDate = new Date(System.currentTimeMillis() - ((long)delaySecs * 1000)); - logger.debug(String.format("Removing stats for policy %d in as group %d, before %s", policyTO.getId(), groupTO.getId(), beforeDate)); + logger.debug("Removing stats for policy [id={}, uuid={}] in as group [id={}, uuid={}], before {}", + policyTO.getId(), policyTO.getUuid(), groupTO.getId(), groupTO.getUuid(), beforeDate); asGroupStatisticsDao.removeByGroupAndPolicy(groupTO.getId(), policyTO.getId(), beforeDate); if (delaySecs > maxDelaySecs) { maxDelaySecs = delaySecs; } } Date beforeDate = new Date(System.currentTimeMillis() - ((long)maxDelaySecs * 1000)); - logger.debug(String.format("Removing stats for other policies in as group %d, before %s", groupTO.getId(), beforeDate)); + logger.debug(String.format("Removing stats for other policies in as group [id=%d, uuid=%s], before %s", groupTO.getId(), groupTO.getUuid(), beforeDate)); asGroupStatisticsDao.removeByGroupId(groupTO.getId(), beforeDate); } @@ -3034,15 +3040,15 @@ public void removeVmFromVmGroup(Long vmId) { } protected boolean destroyVm(Long vmId) { + UserVmVO vm = userVmDao.findById(vmId); try { - UserVmVO vm = userVmDao.findById(vmId); if (vm != null) { userVmMgr.destroyVm(vmId, true); userVmMgr.expunge(vm); } return true; } catch (Exception ex) { - logger.error("Cannot destroy vm with id: " + vmId + "due to Exception: ", ex); + logger.error("Cannot destroy vm {} with id: {} due to Exception: ", vm, vmId, ex); return false; } } diff --git a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java index 3449f1f5d00e..d3741afdc7ef 100644 --- a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java +++ b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import com.cloud.host.Host; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -341,14 +342,14 @@ public boolean postStateTransitionEvent(StateMachine2.Transition customUserdataParamMap = getVMCustomUserdataParamMap(profile.getId()); @@ -554,9 +555,9 @@ private boolean createConfigDriveIsoOnHostCache(NicProfile nic, VirtualMachinePr final String isoData = ConfigDriveBuilder.buildConfigDrive(nicProfiles, profile.getVmData(), isoFileName, profile.getConfigDriveLabel(), customUserdataParamMap, supportedServices); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, null, false, true, true); - final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(host.getId(), configDriveIsoCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to handle config drive creation for vm: " + profile.getInstanceName() + " on host: " + hostId); + throw new CloudRuntimeException(String.format("Unable to get an answer to handle config drive creation for vm: %s on host: %s", profile, host)); } if (!answer.getResult()) { @@ -576,26 +577,26 @@ private boolean deleteConfigDriveIsoOnHostCache(final VirtualMachine vm, final L ConfigDriveNetworkElement.class, 0L); } - logger.debug("Deleting config drive ISO for vm: " + vm.getInstanceName() + " on host: " + hostId); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, null, false, true, false); HostVO hostVO = _hostDao.findById(hostId); + logger.debug("Deleting config drive ISO for vm: {} on host: {}({})", vm, hostId, hostVO); if (hostVO == null) { logger.warn(String.format("Host %s appears to be unavailable, skipping deletion of config-drive ISO on host cache", hostId)); return false; } if (!Arrays.asList(Status.Up, Status.Connecting).contains(hostVO.getStatus())) { - logger.warn(String.format("Host status %s is not Up or Connecting, skipping deletion of config-drive ISO on host cache", hostId)); + logger.warn("Host status {} is not Up or Connecting, skipping deletion of config-drive ISO on host cache", hostVO); return false; } final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to handle config drive deletion for vm: " + vm.getInstanceName() + " on host: " + hostId); + throw new CloudRuntimeException(String.format("Unable to get an answer to handle config drive deletion for vm: %s on host: %s", vm, hostVO)); } if (!answer.getResult()) { - logger.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + logger.error("Failed to remove config drive for instance: {}", vm); return false; } return true; @@ -630,7 +631,7 @@ public boolean createConfigDriveIso(NicProfile nic, VirtualMachineProfile profil ConfigDriveNetworkElement.class, 0L); } - logger.debug("Creating config drive ISO for vm: " + profile.getInstanceName()); + logger.debug("Creating config drive ISO for vm: {}", profile); Map customUserdataParamMap = getVMCustomUserdataParamMap(profile.getId()); @@ -698,7 +699,7 @@ private boolean deleteConfigDriveIso(final VirtualMachine vm) throws ResourceUna Long hostId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); Location location = getConfigDriveLocation(vm.getId()); if (hostId == null) { - logger.info(String.format("The VM was never booted; no config-drive ISO created for VM %s", vm.getName())); + logger.info("The VM was never booted; no config-drive ISO created for VM {}", vm); return true; } if (location == Location.HOST) { @@ -726,14 +727,14 @@ private boolean deleteConfigDriveIso(final VirtualMachine vm) throws ResourceUna ConfigDriveNetworkElement.class, 0L); } - logger.debug("Deleting config drive ISO for vm: " + vm.getInstanceName()); + logger.debug("Deleting config drive ISO for vm: {}", vm); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false, false, false); final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { - logger.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + logger.error("Failed to remove config drive for instance: {}", vm); return false; } return true; diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index a87504cd07a7..8e3573cbec8e 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -281,7 +281,7 @@ public boolean applyFWRules(final Network network, final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply firewall rules on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -328,7 +328,7 @@ public boolean applyLBRules(final Network network, final List final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply lb rules on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -352,7 +352,7 @@ public String[] applyVpnUsers(final RemoteAccessVpn vpn, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply vpn users on the backend; virtual router" + " doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply vpn users on the backend; virtual router doesn't exist in the network {}", network); return null; } @@ -376,7 +376,7 @@ public boolean startVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExc if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't" + " exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need stop vpn on the backend; virtual router doesn't exist in the network {}", network); return true; } return _routerMgr.startRemoteAccessVpn(network, vpn, routers); @@ -396,8 +396,7 @@ public boolean stopVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExce if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug(String.format("There is no virtual router in network [uuid: %s, name: %s], it is not necessary to stop the VPN on backend.", - network.getUuid(), network.getName())); + logger.debug("There is no virtual router in network {}, it is not necessary to stop the VPN on backend.", network); return true; } return _routerMgr.deleteRemoteAccessVpn(network, vpn, routers); @@ -420,7 +419,7 @@ public boolean applyIps(final Network network, final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -592,7 +591,7 @@ public boolean applyStaticNats(final Network network, final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply static nat on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -694,7 +693,7 @@ public boolean savePassword(final Network network, final NicProfile nic, final V } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network {}", network); return true; } @@ -752,7 +751,7 @@ public boolean saveSSHKey(final Network network, final NicProfile nic, final Vir } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network {}", network); return true; } @@ -802,7 +801,7 @@ public boolean saveUserData(final Network network, final NicProfile nic, final V } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network {}", network); return true; } @@ -876,7 +875,7 @@ public boolean applyPFRules(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply firewall rules on the backend; virtual router doesn't exist in the network {}", network); return true; } @@ -1245,7 +1244,7 @@ private void updateUserVmData(final NicProfile nic, final Network network, final logger.debug("Successfully saved user data to router"); } } else { - logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vm.getId() + " because it is not supported in network id=" + network.getId()); + logger.debug("Not applying userdata for nic {} in vm {} because it is not supported in network {}", nic, vm, network); } } diff --git a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java index 841f62211822..3d613fca18ea 100644 --- a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java @@ -60,6 +60,7 @@ import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcGateway; import com.cloud.network.vpc.VpcManager; +import com.cloud.network.vpc.dao.NetworkACLDao; import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.offering.NetworkOffering; @@ -99,6 +100,8 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Inject VpcGatewayDao _vpcGatewayDao; @Inject + NetworkACLDao _networkACLDao; + @Inject NetworkACLItemDao _networkACLItemDao; @Inject EntityManager _entityMgr; @@ -435,7 +438,9 @@ public boolean createPrivateGateway(final PrivateGateway gateway) throws Concurr final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - logger.debug(getName() + " element doesn't need to create Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug("{} element doesn't need to create Private gateway on the backend; VPC" + + " virtual router doesn't exist in the vpc id {} ({})", + this::getName, gateway::getVpcId, () -> _vpcDao.findById(gateway.getVpcId())); return true; } @@ -454,7 +459,7 @@ public boolean createPrivateGateway(final PrivateGateway gateway) throws Concurr final List rules = _networkACLItemDao.listByACL(gateway.getNetworkACLId()); result = result && networkTopology.applyNetworkACLs(network, rules, domainRouterVO, isPrivateGateway); } catch (final Exception ex) { - logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); + logger.debug("Failed to apply network acl {} on gateway ", () -> _networkACLDao.findById(gateway.getNetworkACLId())); return false; } } @@ -472,7 +477,9 @@ public boolean deletePrivateGateway(final PrivateGateway gateway) throws Concurr final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - logger.debug(getName() + " element doesn't need to delete Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug("{} element doesn't need to delete Private gateway on the backend; VPC " + + "virtual router doesn't exist in the vpc id {} ({})", + this::getName, gateway::getVpcId, () -> _vpcDao.findById(gateway.getVpcId())); return true; } @@ -501,8 +508,8 @@ public boolean applyIps(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug(getName() + " element doesn't need to associate ip addresses on the backend; VPC virtual " + "router doesn't exist in the network " - + network.getId()); + logger.debug("{} element doesn't need to associate ip addresses on the backend; " + + "VPC virtual router doesn't exist in the network {}", getName(), network); return false; } @@ -522,7 +529,8 @@ public boolean applyNetworkACLs(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply firewall rules on the " + + "backend; virtual router doesn't exist in the network {}", network); return true; } @@ -533,7 +541,7 @@ public boolean applyNetworkACLs(final Network network, final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router element doesn't need to apply network acl rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply network acl rules on the " + + "backend; virtual router doesn't exist in the network {}", network); return true; } @@ -612,12 +621,12 @@ public boolean startSite2SiteVpn(final Site2SiteVpnConnection conn) throws Resou final Vpc vpc = _entityMgr.findById(Vpc.class, vpcId); if (!_ntwkModel.isProviderEnabledInZone(vpc.getZoneId(), Provider.VPCVirtualRouter.getName())) { - throw new ResourceUnavailableException("VPC provider is not enabled in zone " + vpc.getZoneId(), DataCenter.class, vpc.getZoneId()); + throw new ResourceUnavailableException(String.format("VPC provider is not enabled in zone %s", _dcDao.findById(vpc.getZoneId())), DataCenter.class, vpc.getZoneId()); } final List routers = _vpcRouterMgr.getVpcRouters(ip.getVpcId()); if (routers == null) { - throw new ResourceUnavailableException("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc " + ip.getVpcId(), DataCenter.class, + throw new ResourceUnavailableException(String.format("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc %s", vpc), DataCenter.class, vpc.getZoneId()); } @@ -643,12 +652,12 @@ public boolean stopSite2SiteVpn(final Site2SiteVpnConnection conn) throws Resour final Vpc vpc = _entityMgr.findById(Vpc.class, vpcId); if (!_ntwkModel.isProviderEnabledInZone(vpc.getZoneId(), Provider.VPCVirtualRouter.getName())) { - throw new ResourceUnavailableException("VPC provider is not enabled in zone " + vpc.getZoneId(), DataCenter.class, vpc.getZoneId()); + throw new ResourceUnavailableException(String.format("VPC provider is not enabled in zone %s", _dcDao.findById(vpc.getZoneId())), DataCenter.class, vpc.getZoneId()); } final List routers = _vpcRouterMgr.getVpcRouters(ip.getVpcId()); if (routers == null) { - throw new ResourceUnavailableException("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc " + ip.getVpcId(), DataCenter.class, + throw new ResourceUnavailableException(String.format("Cannot enable site-to-site VPN on the backend; virtual router doesn't exist in the vpc %s", vpc), DataCenter.class, vpc.getZoneId()); } @@ -669,7 +678,7 @@ public String[] applyVpnUsers(final RemoteAccessVpn vpn, final List routers = _vpcRouterMgr.getVpcRouters(vpcId); if (routers == null) { - logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpcId); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network {}", () -> _vpcDao.findById(vpcId)); return null; } @@ -698,7 +707,7 @@ public boolean startVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network {}", () -> _vpcDao.findById(vpn.getVpcId())); return false; } @@ -717,7 +726,7 @@ public boolean stopVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExce final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network {}", () -> _vpcDao.findById(vpn.getVpcId())); return false; } diff --git a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java index 6b98fc00c594..6b8133534d21 100644 --- a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java @@ -451,7 +451,7 @@ public void detectRulesConflict(FirewallRule newRule) throws NetworkRuleConflict if (newRule.getProtocol().equalsIgnoreCase(NetUtils.ICMP_PROTO) && newRule.getProtocol().equalsIgnoreCase(rule.getProtocol())) { if (newRule.getIcmpCode().longValue() == rule.getIcmpCode().longValue() && newRule.getIcmpType().longValue() == rule.getIcmpType().longValue() && newRule.getProtocol().equalsIgnoreCase(rule.getProtocol()) && duplicatedCidrs) { - throw new InvalidParameterValueException("New rule conflicts with existing rule id=" + rule.getId()); + throw new InvalidParameterValueException(String.format("New rule conflicts with existing rule: %s", rule)); } } @@ -483,8 +483,7 @@ public void detectRulesConflict(FirewallRule newRule) throws NetworkRuleConflict (rule.getPurpose() == Purpose.LoadBalancing && newRule.getPurpose() == Purpose.Vpn && !newRule.getProtocol().equalsIgnoreCase(rule.getProtocol())); if (!(allowPf || allowStaticNat || oneOfRulesIsFirewall || allowVpnPf || allowVpnLb)) { - throw new NetworkRuleConflictException("The range specified, " + newRule.getSourcePortStart() + "-" + newRule.getSourcePortEnd() + - ", conflicts with rule " + rule.getId() + " which has " + rule.getSourcePortStart() + "-" + rule.getSourcePortEnd()); + throw new NetworkRuleConflictException(String.format("The range specified, %d-%d, conflicts with rule %s which has %d-%d", newRule.getSourcePortStart(), newRule.getSourcePortEnd(), rule, rule.getSourcePortStart(), rule.getSourcePortEnd())); } } } @@ -609,7 +608,7 @@ public void validateFirewallRule(Account caller, IPAddressVO ipAddress, Integer } else if (proto.equalsIgnoreCase(NetUtils.ICMP_PROTO) && purpose != Purpose.Firewall) { throw new InvalidParameterValueException("Protocol " + proto + " is currently supported only for rules with purpose " + Purpose.Firewall); } else if (purpose == Purpose.Firewall && !supportedTrafficTypes.contains(trafficType.toString().toLowerCase())) { - throw new InvalidParameterValueException("Traffic Type " + trafficType + " is currently supported by Firewall in network " + networkId); + throw new InvalidParameterValueException(String.format("Traffic Type %s is currently supported by Firewall in network %s", trafficType, network)); } } @@ -639,15 +638,13 @@ public boolean applyRules(List rules, boolean continueOn if (rule.getState() == FirewallRule.State.Revoke) { FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(rule.getId()); if (relatedRule != null) { - logger.warn("Can't remove the firewall rule id=" + rule.getId() + " as it has related firewall rule id=" + relatedRule.getId() + - "; leaving it in Revoke state"); + logger.warn(String.format("Can't remove the firewall rule [%s] as it has related firewall rule [%s]; leaving it in Revoke state", rule, relatedRule)); success = false; } else { removeRule(rule); if (rule.getSourceIpAddressId() != null) { //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(rule.getSourceIpAddressId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), rule.getNetworkId()); + _vpcMgr.unassignIPFromVpcNetwork(rule.getSourceIpAddressId(), rule.getNetworkId()); } } } else if (rule.getState() == FirewallRule.State.Add) { @@ -974,12 +971,12 @@ public FirewallRule getFirewallRule(long ruleId) { @Override @ActionEvent(eventType = EventTypes.EVENT_FIREWALL_CLOSE, eventDescription = "revoking firewall rule", async = true) - public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException { List rules = new ArrayList(); - List fwRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall); + List fwRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ip.getId(), Purpose.Firewall); if (logger.isDebugEnabled()) { - logger.debug("Releasing " + fwRules.size() + " firewall rules for ip id=" + ipId); + logger.debug("Releasing {} firewall rules for ip {}", fwRules.size(), ip); } for (FirewallRuleVO rule : fwRules) { @@ -989,7 +986,7 @@ public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) } // now send everything to the backend - List rulesToApply = _firewallDao.listByIpAndPurpose(ipId, Purpose.Firewall); + List rulesToApply = _firewallDao.listByIpAndPurpose(ip.getId(), Purpose.Firewall); //apply rules if (!applyFirewallRules(rulesToApply, rulesContinueOnErrFlag, caller)) { if (!rulesContinueOnErrFlag) { @@ -997,10 +994,10 @@ public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) } } // Now we check again in case more rules have been inserted. - rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall)); + rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ip.getId(), Purpose.Firewall)); if (logger.isDebugEnabled()) { - logger.debug("Successfully released firewall rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + logger.debug("Successfully released firewall rules for ip {} and # of rules now = {}", ip, rules.size()); } return rules.size() == 0; @@ -1025,12 +1022,12 @@ public FirewallRule createRuleForAllCidrs(long ipAddrId, Account caller, Integer @Override @ActionEvent(eventType = EventTypes.EVENT_FIREWALL_CLOSE, eventDescription = "revoking firewall rule", async = true) - public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException { List rules = new ArrayList(); - List fwRules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall); + List fwRules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(network.getId(), Purpose.Firewall); if (logger.isDebugEnabled()) { - logger.debug("Releasing " + fwRules.size() + " firewall rules for network id=" + networkId); + logger.debug("Releasing {} firewall rules for network {}", fwRules.size(), network); } for (FirewallRuleVO rule : fwRules) { @@ -1040,14 +1037,14 @@ public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Acc } // now send everything to the backend - List rulesToApply = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.Firewall); + List rulesToApply = _firewallDao.listByNetworkAndPurpose(network.getId(), Purpose.Firewall); boolean success = applyFirewallRules(rulesToApply, true, caller); // Now we check again in case more rules have been inserted. - rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall)); + rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(network.getId(), Purpose.Firewall)); if (logger.isDebugEnabled()) { - logger.debug("Successfully released firewall rules for network id=" + networkId + " and # of rules now = " + rules.size()); + logger.debug("Successfully released firewall rules for network {} and # of rules now = {}", network, rules.size()); } return success && rules.size() == 0; @@ -1062,7 +1059,7 @@ public boolean revokeRelatedFirewallRule(long ruleId, boolean apply) { return true; } - logger.debug("Revoking Firewall rule id=" + fwRule.getId() + " as a part of rule delete id=" + ruleId + " with apply=" + apply); + logger.debug("Revoking Firewall rule [{}] as a part of rule delete id={} with apply={}", fwRule, ruleId, apply); return revokeIngressFirewallRule(fwRule.getId(), apply); } @@ -1098,10 +1095,10 @@ public boolean revokeFirewallRulesForVm(long vmId) { Set ipsToReprogram = new HashSet(); if (firewallRules.isEmpty()) { - logger.debug("No firewall rules are found for vm id=" + vmId); + logger.debug("No firewall rules are found for vm: {}", vm); return true; } else { - logger.debug("Found " + firewallRules.size() + " to cleanup for vm id=" + vmId); + logger.debug("Found {} to cleanup for vm: {}", firewallRules.size(), vm); } for (FirewallRuleVO rule : firewallRules) { @@ -1112,11 +1109,12 @@ public boolean revokeFirewallRulesForVm(long vmId) { // apply rules for all ip addresses for (Long ipId : ipsToReprogram) { - logger.debug("Applying firewall rules for ip address id=" + ipId + " as a part of vm expunge"); + IPAddressVO ip = _ipAddressDao.findById(ipId); + logger.debug("Applying firewall rules for ip address {} with id={} as a part of vm expunge", ip, ipId); try { success = success && applyIngressFirewallRules(ipId, _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException ex) { - logger.warn("Failed to apply firewall rules for ip id=" + ipId); + logger.warn("Failed to apply firewall rules for ip {}", ip); success = false; } } diff --git a/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java index a8c98fc1deee..28948174fb0f 100644 --- a/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java @@ -359,7 +359,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin @DB public void deallocate(final Network network, final NicProfile nic, VirtualMachineProfile vm) { if (logger.isDebugEnabled()) { - logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + logger.debug("Deallocate network: network: {}, nic: {}", network, nic); } if (nic.getIPv4Address() != null) { @@ -371,14 +371,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // if the ip address a part of placeholder, don't release it Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null && placeholderNic.getIPv4Address().equalsIgnoreCase(ip.getAddress().addr())) { - logger.debug("Not releasing direct ip " + ip.getId() + " yet as its ip is saved in the placeholder"); + logger.debug("Not releasing direct ip {} yet as its ip is saved in the placeholder", ip); } else { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); } //unassign nic secondary ip address - logger.debug("remove nic " + nic.getId() + " secondary ip "); + logger.debug("remove nic {} secondary ip ", nic); List nicSecIps = null; nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); for (String secIp : nicSecIps) { diff --git a/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index 2800e3284c1b..2eada6ae4272 100644 --- a/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -226,16 +226,16 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff */ if (vlan.getIp6Cidr() != null) { if (nic.getIPv6Address() == null) { - logger.debug("Found IPv6 CIDR " + vlan.getIp6Cidr() + " for VLAN " + vlan.getId()); + logger.debug("Found IPv6 CIDR {} for VLAN {}", vlan.getIp6Cidr(), vlan); nic.setIPv6Cidr(vlan.getIp6Cidr()); nic.setIPv6Gateway(vlan.getIp6Gateway()); IPv6Address ipv6addr = NetUtils.EUI64Address(vlan.getIp6Cidr(), nic.getMacAddress()); - logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); + logger.info("Calculated IPv6 address {} using EUI-64 for NIC {}", ipv6addr, nic); nic.setIPv6Address(ipv6addr.toString()); } } else { - logger.debug("No IPv6 CIDR configured for VLAN " + vlan.getId()); + logger.debug("No IPv6 CIDR configured for VLAN {}", vlan); } } }); diff --git a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java index c46be9bf4289..96c3da66c090 100644 --- a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java @@ -201,7 +201,7 @@ public boolean isMyIsolationMethod(final PhysicalNetwork physicalNetwork) { } if (methods.isEmpty()) { // The empty isolation method is assumed to be VLAN - logger.debug("Empty physical isolation type for physical network " + physicalNetwork.getUuid()); + logger.debug("Empty physical isolation type for physical network {}", physicalNetwork); methods = new ArrayList(1); methods.add("VLAN".toLowerCase()); } @@ -297,7 +297,7 @@ public Network design(final NetworkOffering offering, final DeploymentPlan plan, public void deallocate(final Network network, final NicProfile nic, final VirtualMachineProfile vm) { if (network.getSpecifyIpRanges()) { if (logger.isDebugEnabled()) { - logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + logger.debug("Deallocate network: {}, nic: {}", network, nic); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -321,7 +321,7 @@ public int getVlanOffset(final long physicalNetworkId, final int vlanTag) { } if (pNetwork.getVnet() == null) { - throw new CloudRuntimeException("Could not find vlan range for physical Network " + physicalNetworkId + "."); + throw new CloudRuntimeException(String.format("Could not find vlan range for physical Network %s.", pNetwork)); } Integer lowestVlanTag = null; final List> vnetList = pNetwork.getVnet(); @@ -437,7 +437,8 @@ public NicProfile allocate(final Network network, NicProfile nic, final VirtualM if (network.getGuestType() != GuestType.L2 && vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null) { - logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); + logger.debug("Nic {} got an ip address {} stored in placeholder nic " + + "for the network {}", nic, placeholderNic.getIPv4Address(), network); guestIp = placeholderNic.getIPv4Address(); } } @@ -515,11 +516,11 @@ public void shutdown(final NetworkProfile profile, final NetworkOffering offerin } if ((profile.getBroadcastDomainType() == BroadcastDomainType.Vlan || profile.getBroadcastDomainType() == BroadcastDomainType.Vxlan) && !offering.isSpecifyVlan()) { - logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug("Releasing vnet for the network: {}", profile); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), profile.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_ZONE_VLAN_RELEASE, - "Released Zone Vnet: " + BroadcastDomainType.getValue(profile.getBroadcastUri()) + " for Network: " + profile.getId(), + String.format("Released Zone Vnet: %s for Network: %s", BroadcastDomainType.getValue(profile.getBroadcastUri()), profile), profile.getDataCenterId(), ApiCommandResourceType.Zone.toString(), 0); } diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 320f064a31e1..5d4ce1052cfc 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -376,14 +376,14 @@ public LoadBalancerTO.AutoScaleVmGroupTO toAutoScaleVmGroupTO(LbAutoScaleVmGroup List conditionTOs = new ArrayList<>(lbConditions.size()); for (LbCondition lbCondition : lbConditions) { Counter counter = lbCondition.getCounter(); - LoadBalancerTO.CounterTO counterTO = new LoadBalancerTO.CounterTO(counter.getId(), counter.getName(), counter.getSource(), "" + counter.getValue(), counter.getProvider()); + LoadBalancerTO.CounterTO counterTO = new LoadBalancerTO.CounterTO(counter.getId(), counter.getUuid(), counter.getName(), counter.getSource(), "" + counter.getValue(), counter.getProvider()); Condition condition = lbCondition.getCondition(); - LoadBalancerTO.ConditionTO conditionTO = new LoadBalancerTO.ConditionTO(condition.getId(), condition.getThreshold(), condition.getRelationalOperator(), counterTO); + LoadBalancerTO.ConditionTO conditionTO = new LoadBalancerTO.ConditionTO(condition.getId(), condition.getUuid(), condition.getThreshold(), condition.getRelationalOperator(), counterTO); conditionTOs.add(conditionTO); } AutoScalePolicy autoScalePolicy = lbAutoScalePolicy.getPolicy(); - autoScalePolicyTOs.add(new LoadBalancerTO.AutoScalePolicyTO(autoScalePolicy.getId(), autoScalePolicy.getDuration(), autoScalePolicy.getQuietTime(), autoScalePolicy.getLastQuietTime(), - autoScalePolicy.getAction(), conditionTOs, lbAutoScalePolicy.isRevoked())); + autoScalePolicyTOs.add(new LoadBalancerTO.AutoScalePolicyTO(autoScalePolicy.getId(), autoScalePolicy.getUuid(), autoScalePolicy.getDuration(), autoScalePolicy.getQuietTime(), + autoScalePolicy.getLastQuietTime(), autoScalePolicy.getAction(), conditionTOs, lbAutoScalePolicy.isRevoked())); } LbAutoScaleVmProfile lbAutoScaleVmProfile = lbAutoScaleVmGroup.getProfile(); AutoScaleVmProfile autoScaleVmProfile = lbAutoScaleVmProfile.getProfile(); @@ -415,7 +415,7 @@ public Network.Provider getLoadBalancerServiceProvider(LoadBalancerVO loadBalanc Network network = _networkDao.findById(loadBalancer.getNetworkId()); List providers = _networkMgr.getProvidersForServiceInNetwork(network, Network.Service.Lb); if (CollectionUtils.isEmpty(providers)) { - throw new CloudRuntimeException(String.format("Unable to find LB provider for network with id: %s ", network.getId())); + throw new CloudRuntimeException(String.format("Unable to find LB provider for network: %s ", network)); } return providers.get(0); } @@ -477,16 +477,16 @@ public boolean configureLbAutoScaleVmGroup(final long vmGroupid, AutoScaleVmGrou try { success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState); } catch (ResourceUnavailableException e) { - logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavailable:", e); + logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: {} because resource is unavailable:", loadBalancer, e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); + logger.debug("LB Rollback rule: {} lb state rollback while creating AutoscaleVmGroup", loadBalancer); } throw e; } finally { if (!success) { - logger.warn("Failed to configure LB Auto Scale Vm Group with Id:" + vmGroupid); + logger.warn(String.format("Failed to configure LB Auto Scale Vm Group: %s", vmGroup)); } } @@ -496,15 +496,15 @@ public boolean configureLbAutoScaleVmGroup(final long vmGroupid, AutoScaleVmGrou @Override public void doInTransactionWithoutResult(TransactionStatus status) { loadBalancer.setState(FirewallRule.State.Active); - logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); + logger.debug("LB rule {} state is set to Active", loadBalancer); _lbDao.persist(loadBalancer); vmGroup.setState(AutoScaleVmGroup.State.ENABLED); _autoScaleVmGroupDao.persist(vmGroup); - logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); + logger.debug("LB Auto Scale Vm Group: {} is set to Enabled state.", vmGroup); } }); } - logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); + logger.info("Successfully configured LB Autoscale Vm Group: {}", vmGroup); } return success; } @@ -603,12 +603,12 @@ public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " is in deleting state: "); + throw new InvalidParameterValueException(String.format("Failed: LB rule: %s is in deleting state: ", loadBalancer)); } /* Generic validations */ if (!genericValidator(cmd)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException(String.format("Failed to create Stickiness policy: Validation Failed %s", loadBalancer)); } /* @@ -623,7 +623,7 @@ public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd LoadBalancingRule lbRule = new LoadBalancingRule(loadBalancer, getExistingDestinations(lbpolicy.getId()), policyList, null, sourceIp, null, loadBalancer.getLbProtocol()); if (!validateLbRule(lbRule)) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Validation Failed " + cmd.getLbRuleId()); + throw new InvalidParameterValueException(String.format("Failed to create Stickiness policy: Validation Failed %s", loadBalancer)); } /* Finally Insert into DB */ @@ -663,7 +663,7 @@ public HealthCheckPolicy createLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException("Failed: LB rule id: " + cmd.getLbRuleId() + " is in deleting state: "); + throw new InvalidParameterValueException(String.format("Failed: LB rule: %s is in deleting state: ", loadBalancer)); } /* @@ -672,13 +672,13 @@ public HealthCheckPolicy createLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd */ if (!validateHealthCheck(cmd)) { throw new InvalidParameterValueException( - "Failed to create HealthCheck policy: Validation Failed (HealthCheck Policy is not supported by LB Provider for the LB rule id :" + cmd.getLbRuleId() + ")"); + String.format("Failed to create HealthCheck policy: Validation Failed (HealthCheck Policy is not supported by LB Provider for the LB rule:%s)", loadBalancer)); } /* Validation : check for the multiple hc policies to the rule id */ List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(cmd.getLbRuleId(), false); if (hcPolicies.size() > 0) { - throw new InvalidParameterValueException("Failed to create HealthCheck policy: Already policy attached for the LB Rule id :" + cmd.getLbRuleId()); + throw new InvalidParameterValueException(String.format("Failed to create HealthCheck policy: Already policy attached for the LB Rule:%s", loadBalancer)); } /* * Specific validations using network element validator for specific @@ -752,12 +752,12 @@ public boolean applyLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) { try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); + logger.warn("Unable to apply Stickiness policy to the lb rule: {} because resource is unavailable:", loadBalancer, e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); deleteLBStickinessPolicy(cmd.getEntityId(), false); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + logger.debug("LB Rollback rule: {} lb state rollback while creating sticky policy", loadBalancer); } else { deleteLBStickinessPolicy(cmd.getEntityId(), false); if (oldStickinessPolicyId != 0) { @@ -798,11 +798,11 @@ public boolean applyLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) { try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); + logger.warn("Unable to apply healthcheck policy to the lb rule: {} because resource is unavailable:", loadBalancer, e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating healthcheck policy"); + logger.debug("LB Rollback rule: {} lb state rollback while creating healthcheck policy", loadBalancer); } deleteLBHealthCheckPolicy(cmd.getEntityId(), false); success = false; @@ -823,7 +823,8 @@ public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(stickinessPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + " for Stickiness policy id: " + stickinessPolicyId); + throw new InvalidParameterValueException(String.format("Invalid Load balancer: %d for Stickiness policy: %s", + stickinessPolicy.getLoadBalancerId(), stickinessPolicy)); } long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); @@ -838,12 +839,13 @@ public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) boolean backupStickyState = stickinessPolicy.isRevoke(); stickinessPolicy.setRevoke(true); _lb2stickinesspoliciesDao.persist(stickinessPolicy); - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); + logger.debug("Set load balancer rule for revoke: rule {}, stickinesspolicy {}", loadBalancer, stickinessPolicy); try { if (!applyLoadBalancerConfig(loadBalancerId)) { - logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); - throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); + String error = String.format("Failed to remove load balancer rule %s for stickinesspolicy %s", loadBalancer, stickinessPolicy); + logger.warn(error); + throw new CloudRuntimeException(error); } } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { @@ -851,7 +853,7 @@ public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) _lb2stickinesspoliciesDao.persist(stickinessPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); + logger.debug("LB Rollback rule: {} while deleting sticky policy: {}", loadBalancer, stickinessPolicy); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; @@ -876,7 +878,7 @@ public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(healthCheckPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + " for HealthCheck policy id: " + healthCheckPolicyId); + throw new InvalidParameterValueException(String.format("Invalid Load balancer: %d for HealthCheck policy: %s", healthCheckPolicy.getLoadBalancerId(), healthCheckPolicy)); } final long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); @@ -891,7 +893,7 @@ public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply boolean backupStickyState = healthCheckPolicy.isRevoke(); healthCheckPolicy.setRevoke(true); _lb2healthcheckDao.persist(healthCheckPolicy); - logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + ", healthCheckpolicyID " + healthCheckPolicyId); + logger.debug("Set health check policy to revoke for loadbalancing rule : {}, healthCheckpolicy {}", loadBalancer, healthCheckPolicy); // removing the state of services set by the monitor. final List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); @@ -899,7 +901,7 @@ public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - logger.debug("Resetting health state policy for services in loadbalancing rule id : " + loadBalancerId); + logger.debug("Resetting health state policy for services in loadbalancing rule: {}", loadBalancer); for (LoadBalancerVMMapVO map : maps) { map.setState(null); _lb2VmMapDao.persist(map); @@ -910,8 +912,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { if (!applyLoadBalancerConfig(loadBalancerId)) { - logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); - throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); + String error = String.format("Failed to remove load balancer rule %s for healthCheckpolicy %s", loadBalancer, healthCheckPolicy); + logger.warn(error); + throw new CloudRuntimeException(error); } } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { @@ -919,7 +922,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _lb2healthcheckDao.persist(healthCheckPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId); + logger.debug("LB Rollback rule: {} while deleting healthcheck policy: {}", loadBalancer, healthCheckPolicy); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; @@ -1023,7 +1026,7 @@ public boolean assignToLoadBalancer(long loadBalancerId, List instanceIds, } if (!isAutoScaleVM && _autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { - throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + " because it is being used by an Autoscale VM group."); + throw new InvalidParameterValueException(String.format("Failed to assign to load balancer %s because it is being used by an Autoscale VM group.", loadBalancer)); } if (instanceIds == null && vmIdIpMap.isEmpty()) { @@ -1212,7 +1215,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); + logger.debug("LB Rollback rule: {} while attaching VM: {}", loadBalancer, vmInstanceIds); } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -1314,7 +1317,7 @@ public boolean assignCertToLoadBalancer(long lbRuleId, Long certId) { _lbDao.persist(loadBalancer); LoadBalancerCertMapVO certMap = _lbCertMapDao.findByLbRuleId(lbRuleId); _lbCertMapDao.remove(certMap.getId()); - logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert"); + logger.debug("LB Rollback rule: {} while adding cert", loadBalancer); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } @@ -1350,8 +1353,8 @@ public boolean removeCertFromLoadBalancer(long lbRuleId) { _lbCertMapDao.persist(lbCertMap); if (!applyLoadBalancerConfig(lbRuleId)) { - logger.warn("Failed to remove cert from load balancer rule id " + lbRuleId); - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate load balancer rule id " + lbRuleId); + logger.warn("Failed to remove cert from load balancer rule {}", loadBalancer); + CloudRuntimeException ex = new CloudRuntimeException(String.format("Failed to remove certificate load balancer rule %s", loadBalancer)); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; } @@ -1362,11 +1365,11 @@ public boolean removeCertFromLoadBalancer(long lbRuleId) { _lbCertMapDao.persist(lbCertMap); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("Rolled back certificate removal lb id " + lbRuleId); + logger.debug(String.format("Rolled back certificate removal lb %s", loadBalancer)); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); if (!success) { - CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate from load balancer rule id " + lbRuleId); + CloudRuntimeException ex = new CloudRuntimeException(String.format("Failed to remove certificate from load balancer rule %s", loadBalancer)); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; } @@ -1435,25 +1438,23 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i lbvm.setRevoke(true); _lb2VmMapDao.persist(lbvm); } - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}", loadBalancer, instanceId); } else { for (String vmIp: lbVmIps) { LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmIdVmIp (loadBalancerId, instanceId, vmIp); if (map == null) { - throw new InvalidParameterValueException("The instance id: "+ instanceId +" is not configured " - + " for LB rule id " + loadBalancerId); + throw new InvalidParameterValueException(String.format("The instance id: %d is not configured for LB rule %s", instanceId, loadBalancer)); } map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + - instanceId + ", vmip " + vmIp); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}, vmip {}", loadBalancer, instanceId, vmIp); } } } if (!applyLoadBalancerConfig(loadBalancerId)) { - logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); + logger.warn("Failed to remove load balancer rule {} for vms {}", loadBalancer, instanceIds); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; @@ -1478,21 +1479,20 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmId(loadBalancerId, instanceId); map.setRevoke(false); _lb2VmMapDao.persist(map); - logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId); + logger.debug("LB Rollback rule: {},while removing vmId {}", loadBalancer, instanceId); }else { for (String vmIp: lbVmIps) { LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmIdVmIp (loadBalancerId, instanceId, vmIp); map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + - instanceId + ", vmip " + vmIp); + logger.debug("LB Rollback rule: {},while removing vmId {}, vmip {}", loadBalancer, instanceId, vmIp); } } } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances"); + logger.debug("LB Rollback rule: {} while removing vm instances", loadBalancer); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } @@ -1555,7 +1555,7 @@ public boolean deleteLoadBalancerRule(long loadBalancerId, boolean apply) { boolean result = deleteLoadBalancerRule(loadBalancerId, apply, caller, ctx.getCallingUserId(), true); if (!result) { - throw new CloudRuntimeException("Unable to remove load balancer rule " + loadBalancerId); + throw new CloudRuntimeException(String.format("Unable to remove load balancer rule %s", rule)); } return result; } @@ -1575,7 +1575,7 @@ public boolean deleteLoadBalancerRule(final long loadBalancerId, boolean apply, if (lbCertMap != null) { boolean removeResult = removeCertFromLoadBalancer(loadBalancerId); if (!removeResult) { - throw new CloudRuntimeException("Unable to remove certificate from load balancer rule " + loadBalancerId); + throw new CloudRuntimeException(String.format("Unable to remove certificate from load balancer rule %s", lb)); } } @@ -1600,7 +1600,7 @@ public List doInTransaction(TransactionStatus status) { for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}", lb, map.getInstanceId()); } } @@ -1640,12 +1640,12 @@ public List doInTransaction(TransactionStatus status) { if (backupMaps != null) { for (LoadBalancerVMMapVO map : backupMaps) { _lb2VmMapDao.persist(map); - logger.debug("LB Rollback rule id: " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("LB Rollback rule: {}, vmId {}", lb, map.getInstanceId()); } } lb.setState(backupState); _lbDao.persist(lb); - logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule."); + logger.debug("LB Rollback rule: {} while deleting LB rule.", lb); } else { logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } @@ -1655,8 +1655,7 @@ public List doInTransaction(TransactionStatus status) { FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(lb.getId()); if (relatedRule != null) { - logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + - "; leaving it in Revoke state"); + logger.warn("Unable to remove firewall rule={} as it has related firewall rule={}; leaving it in Revoke state", lb, relatedRule); return false; } else { _firewallMgr.removeRule(lb); @@ -1667,7 +1666,7 @@ public List doInTransaction(TransactionStatus status) { // Bug CS-15411 opened to document this // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); - logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); + logger.debug("Load balancer {} is removed successfully", lb); return true; } @@ -1740,7 +1739,7 @@ public LoadBalancer createPublicLoadBalancerRule(String xId, String name, String // set networkId just for verification purposes _networkModel.checkIpForService(ipVO, Service.Lb, networkId); - logger.debug("The ip is not associated with the VPC network id=" + networkId + " so assigning"); + logger.debug("The ip is not associated with the VPC network={} so assigning", network); ipVO = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; } @@ -1772,7 +1771,7 @@ public LoadBalancer createPublicLoadBalancerRule(String xId, String name, String // release ip address if ipassoc was perfored if (performedIpAssoc) { ipVO = _ipAddressDao.findById(ipVO.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ipVO.getId(), networkId); + _vpcMgr.unassignIPFromVpcNetwork(ipVO, network); } } @@ -1887,8 +1886,7 @@ public LoadBalancer createPublicLoadBalancer(final String xId, final String name if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + - " is added successfully."); + logger.debug("Load balancer {} for Ip address: {}, public port {}, private port {} is added successfully.", newRule, ipAddr, srcPort, destPort); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), newRule.getUuid()); @@ -1944,10 +1942,10 @@ public boolean applyLoadBalancerConfig(long lbRuleId) throws ResourceUnavailable } @Override - public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { - List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); + public boolean revokeLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException { + List lbs = _lbDao.listByNetworkIdAndScheme(network.getId(), scheme); if (logger.isDebugEnabled()) { - logger.debug("Revoking " + lbs.size() + " " + scheme + " load balancing rules for network id=" + networkId); + logger.debug("Revoking {} {} load balancing rules for network {}", lbs.size(), scheme, network); } if (lbs != null) { for (LoadBalancerVO lb : lbs) { // called during restart, not persisting state in db @@ -1955,19 +1953,19 @@ public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) thro } return applyLoadBalancerRules(lbs, false); // called during restart, not persisting state in db } else { - logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); + logger.info("Network {} doesn't have load balancer rules, nothing to revoke", network); return true; } } @Override - public boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { - List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); + public boolean applyLoadBalancersForNetwork(Network network, Scheme scheme) throws ResourceUnavailableException { + List lbs = _lbDao.listByNetworkIdAndScheme(network.getId(), scheme); if (lbs != null) { - logger.debug("Applying load balancer rules of scheme " + scheme + " in network id=" + networkId); + logger.debug("Applying load balancer rules of scheme {} in network {}", scheme, network); return applyLoadBalancerRules(lbs, true); } else { - logger.info("Network id=" + networkId + " doesn't have load balancer rules of scheme " + scheme + ", nothing to apply"); + logger.info("Network {} doesn't have load balancer rules of scheme {}, nothing to apply", network, scheme); return true; } } @@ -2030,11 +2028,11 @@ public Boolean doInTransaction(TransactionStatus status) { if (lb.getState() == FirewallRule.State.Revoke) { removeLBRule(lb); - logger.debug("LB " + lb.getId() + " is successfully removed"); + logger.debug("LB {} is successfully removed", lb); checkForReleaseElasticIp = true; } else if (lb.getState() == FirewallRule.State.Add) { lb.setState(FirewallRule.State.Active); - logger.debug("LB rule " + lb.getId() + " state is set to Active"); + logger.debug("LB rule {} state is set to Active", lb); _lbDao.persist(lb); } @@ -2045,22 +2043,22 @@ public Boolean doInTransaction(TransactionStatus status) { for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { instanceIds.add(lbVmMap.getInstanceId()); _lb2VmMapDao.remove(lb.getId(), lbVmMap.getInstanceId(), lbVmMap.getInstanceIp(), null); - logger.debug("Load balancer rule id " + lb.getId() + " is removed for vm " + - lbVmMap.getInstanceId() + " instance ip " + lbVmMap.getInstanceIp()); + logger.debug("Load balancer rule {} is removed for vm {} instance ip {}", + lb, lbVmMap.getInstanceId(), lbVmMap.getInstanceIp());; } if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { lb.setState(FirewallRule.State.Add); _lbDao.persist(lb); - logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); + logger.debug("LB rule {} state is set to Add as there are no more active LB-VM mappings", lb); } // remove LB-Stickiness policy mapping that were state to revoke List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lb.getId(), true); if (!stickinesspolicies.isEmpty()) { _lb2stickinesspoliciesDao.remove(lb.getId(), true); - logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); + logger.debug("Load balancer rule {} is removed stickiness policies", lb); } // remove LB-HealthCheck policy mapping that were state to @@ -2068,13 +2066,13 @@ public Boolean doInTransaction(TransactionStatus status) { List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), true); if (!healthCheckpolicies.isEmpty()) { _lb2healthcheckDao.remove(lb.getId(), true); - logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + logger.debug("Load balancer rule {} is removed health check monitors policies", lb); } LoadBalancerCertMapVO lbCertMap = _lbCertMapDao.findByLbRuleId(lb.getId()); if (lbCertMap != null && lbCertMap.isRevoke()) { _lbCertMapDao.remove(lbCertMap.getId()); - logger.debug("Load balancer rule id " + lb.getId() + " removed certificate mapping"); + logger.debug("Load balancer rule {} removed certificate mapping", lb); } return checkForReleaseElasticIp; @@ -2100,8 +2098,7 @@ public Boolean doInTransaction(TransactionStatus status) { // if the rule is the last one for the ip address assigned to // VPC, unassign it from the network if (lb.getSourceIpAddressId() != null) { - IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), lb.getNetworkId()); + _vpcMgr.unassignIPFromVpcNetwork(lb.getSourceIpAddressId(), lb.getNetworkId()); } } } @@ -2113,12 +2110,12 @@ protected boolean handleSystemLBIpRelease(LoadBalancerVO lb) { IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); boolean success = true; if (ip.getSystem()) { - logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); - if (!_ipAddrMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { - logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.debug("Releasing system ip address {} as a part of delete lb rule", ip); + if (!_ipAddrMgr.disassociatePublicIpAddress(ip, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { + logger.warn("Unable to release system ip address={} as a part of delete lb rule", ip); success = false; } else { - logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.warn("Successfully released system ip address={} as a part of delete lb rule", ip); } } return success; @@ -2135,7 +2132,7 @@ public boolean removeAllLoadBalanacersForIp(long ipId, Account caller, long call for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule {}", rule); return false; } } @@ -2151,7 +2148,7 @@ public boolean removeAllLoadBalanacersForNetwork(long networkId, Account caller, for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule {}", rule); return false; } } @@ -2251,7 +2248,7 @@ public LoadBalancer updateLoadBalancerRule(UpdateLoadBalancerRuleCmd cmd) { // Validate rule in LB provider LoadBalancingRule rule = getLoadBalancerRuleToApply(lb); if (!validateLbRule(rule)) { - throw new InvalidParameterValueException("Modifications in lb rule " + lbRuleId + " are not supported."); + throw new InvalidParameterValueException(String.format("Modifications in lb rule %s are not supported.", lb)); } LoadBalancerVO tmplbVo = _lbDao.findById(lbRuleId); @@ -2283,7 +2280,7 @@ public LoadBalancer updateLoadBalancerRule(UpdateLoadBalancerRuleCmd cmd) { _lbDao.update(lb.getId(), lb); _lbDao.persist(lb); - logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule."); + logger.debug("LB Rollback rule: {} while updating LB rule.", lb); } logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; @@ -2291,7 +2288,7 @@ public LoadBalancer updateLoadBalancerRule(UpdateLoadBalancerRuleCmd cmd) { } if (!success) { - throw new CloudRuntimeException("Failed to update load balancer rule: " + lbRuleId); + throw new CloudRuntimeException(String.format("Failed to update load balancer rule: %s", lb)); } return lb; @@ -2691,7 +2688,7 @@ public StickinessPolicy updateLBStickinessPolicy(long id, String customId, Boole LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(policy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); + throw new InvalidParameterValueException(String.format("Invalid Load balancer: %d for Stickiness policy: %s", policy.getLoadBalancerId(), policy)); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); @@ -2718,7 +2715,7 @@ public HealthCheckPolicy updateLBHealthCheckPolicy(long id, String customId, Boo LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(policy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterValueException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); + throw new InvalidParameterValueException(String.format("Invalid Load balancer : %d for Stickiness policy: %s", policy.getLoadBalancerId(), policy)); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java index 0c21e4559ed4..18ce55aa328b 100644 --- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java @@ -1368,7 +1368,7 @@ protected String getGuestDhcpRange(final NicProfile guestNic, final Network gues private void setIpAddressNetworkParams(IpAddressTO ipAddress, final Network network, final VirtualRouter router) { if (_networkModel.isPrivateGateway(network.getId())) { - logger.debug("network " + network.getId() + " (name: " + network.getName() + " ) is a vpc private gateway, set traffic type to Public"); + logger.debug("network (id: {}, uuid: {}, name: {}) is a vpc private gateway, set traffic type to Public", network.getId(), network.getUuid(), network.getName()); ipAddress.setTrafficType(TrafficType.Public); ipAddress.setPrivateGateway(true); } else { diff --git a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java index 1f4642bbd854..f33a6c2f6322 100644 --- a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java +++ b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java @@ -192,8 +192,8 @@ protected void setupHypervisorsMap() { @Override public boolean sendCommandsToRouter(final VirtualRouter router, final Commands cmds) throws AgentUnavailableException, ResourceUnavailableException { if (!checkRouterVersion(router)) { - logger.debug("Router requires upgrade. Unable to send command to router:" + router.getId() + ", router template version : " + router.getTemplateVersion() - + ", minimal required version : " + NetworkOrchestrationService.MinVRVersion.valueIn(router.getDataCenterId())); + logger.debug("Router requires upgrade. Unable to send command to router: {}, router template version: {}, minimal required version: {}", + router, router.getTemplateVersion(), NetworkOrchestrationService.MinVRVersion.valueIn(router.getDataCenterId())); throw new ResourceUnavailableException("Unable to send command. Router requires upgrade", VirtualRouter.class, router.getId()); } Answer[] answers = null; @@ -242,10 +242,10 @@ public void handleSingleWorkingRedundantRouter(final List> configureGuestNic(fina && _ipAddressDao.findByIpAndSourceNetworkId(guestNetwork.getId(), startIp).getAllocatedTime() == null) { defaultNetworkStartIp = startIp; } else if (logger.isDebugEnabled()) { - logger.debug("First ipv4 " + startIp + " in network id=" + guestNetwork.getId() - + " is already allocated, can't use it for domain router; will get random ip address from the range"); + logger.debug("First ipv4 {} in network {} is already allocated, " + + "can't use it for domain router; will get random ip " + + "address from the range", startIp, guestNetwork); } } } @@ -812,8 +810,9 @@ public LinkedHashMap> configureGuestNic(fina if (startIpv6 != null && _ipv6Dao.findByNetworkIdAndIp(guestNetwork.getId(), startIpv6) == null) { defaultNetworkStartIpv6 = startIpv6; } else if (logger.isDebugEnabled()) { - logger.debug("First ipv6 " + startIpv6 + " in network id=" + guestNetwork.getId() - + " is already allocated, can't use it for domain router; will get random ipv6 address from the range"); + logger.debug("First ipv6 {} in network {} is already allocated, " + + "can't use it for domain router; will get random ipv6 " + + "address from the range", startIpv6, guestNetwork); } } } @@ -901,10 +900,10 @@ public boolean validateHAProxyLBRule(final LoadBalancingRule rule) { } } if (expire != null && !containsOnlyNumbers(expire, timeEndChar)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: expire is not in timeformat: " + expire); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule: %s Cause: expire is not in timeformat: %s", rule.getLb(), expire)); } if (tablesize != null && !containsOnlyNumbers(tablesize, "kmg")) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: tablesize is not in size format: " + tablesize); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule: %s Cause: tablesize is not in size format: %s", rule.getLb(), tablesize)); } } else if (LbStickinessMethod.StickinessMethodType.AppCookieBased.getName().equalsIgnoreCase(stickinessPolicy.getMethodName())) { @@ -923,10 +922,10 @@ public boolean validateHAProxyLBRule(final LoadBalancingRule rule) { } if (length != null && !containsOnlyNumbers(length, null)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: length is not a number: " + length); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: length is not a number: %s", rule.getLb(), length)); } if (holdTime != null && !containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers(holdTime, null)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: holdtime is not in timeformat: " + holdTime); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: holdtime is not in timeformat: %s", rule.getLb(), holdTime)); } } } diff --git a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index bb517eed524d..66b5cf6c8b6e 100644 --- a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -638,7 +638,7 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.NetworkACL, Provider.VPCVirtualRouter)) { final List networkACLs = _networkACLMgr.listNetworkACLItems(guestNetworkId); if (networkACLs != null && !networkACLs.isEmpty()) { - logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for guest network id=" + guestNetworkId); + logger.debug("Found {} network ACLs to apply as a part of VPC VR {} start for guest network {}", networkACLs.size(), domainRouterVO, _networkModel.getNetwork(guestNetworkId)); _commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, guestNetworkId, false); } } @@ -920,18 +920,18 @@ public boolean startRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRout Answer answer = cmds.getAnswer("users"); if (answer == null || !answer.getResult()) { String errorMessage = (answer == null) ? "null answer object" : answer.getDetails(); - logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to " + errorMessage); - throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() - + " on domR: " + router.getInstanceName() + " due to " + errorMessage, DataCenter.class, router.getDataCenterId()); + DataCenter zone = _entityMgr.findById(DataCenter.class, router.getDataCenterId()); + Account account = _entityMgr.findById(Account.class, vpn.getAccountId()); + logger.error("Unable to start vpn: unable add users to vpn in zone {} for account {} on domR: {} due to {}", zone, account, router, errorMessage); + throw new ResourceUnavailableException(String.format("Unable to start vpn: Unable to add users to vpn in zone %s for account %s on domR: %s due to %s", zone, account, router.getInstanceName(), errorMessage), DataCenter.class, router.getDataCenterId()); } answer = cmds.getAnswer("startVpn"); if (answer == null || !answer.getResult()) { String errorMessage = (answer == null) ? "null answer object" : answer.getDetails(); - logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " - + errorMessage); - throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " - + router.getInstanceName() + " due to " + errorMessage, DataCenter.class, router.getDataCenterId()); + DataCenter zone = _entityMgr.findById(DataCenter.class, router.getDataCenterId()); + Account account = _entityMgr.findById(Account.class, vpn.getAccountId()); + logger.error("Unable to start vpn in zone {} for account {} on domR: {} due to {}", zone, account, router, errorMessage); + throw new ResourceUnavailableException(String.format("Unable to start vpn in zone %s for account %s on domR: %s due to %s", zone, account, router.getInstanceName(), errorMessage), DataCenter.class, router.getDataCenterId()); } return true; diff --git a/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java b/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java index ccf8f1884712..b406d9623a22 100644 --- a/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java +++ b/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java @@ -131,7 +131,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter } } catch (final InsufficientAddressCapacityException e) { logger.info(e.getMessage()); - logger.info("unable to configure dhcp for this VM."); + logger.info("unable to configure dhcp for this VM {}", vm); return false; } // this means we did not create an IP alias on the router. diff --git a/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java b/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java index bb66839fb134..1b827b384d0f 100644 --- a/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java +++ b/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java @@ -60,7 +60,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter final NetworkHelper networkHelper = visitor.getVirtualNetworkApplianceFactory().getNetworkHelper(); if (!networkHelper.checkRouterVersion(_router)) { - logger.warn("Router requires upgrade. Unable to send command to router: " + _router.getId()); + logger.warn("Router requires upgrade. Unable to send command to router: {}", _router); return false; } final VirtualMachineManager itMgr = visitor.getVirtualNetworkApplianceFactory().getItMgr(); diff --git a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java index 404c1c88f5a9..575694218aab 100644 --- a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java @@ -167,7 +167,7 @@ protected void checkIpAndUserVm(IpAddress ipAddress, UserVm userVm, Account call if (userVm.getState() == VirtualMachine.State.Destroyed || userVm.getState() == VirtualMachine.State.Expunging) { if (!ignoreVmState) { - throw new InvalidParameterValueException("Invalid user vm: " + userVm.getId()); + throw new InvalidParameterValueException(String.format("Invalid user vm: %s", userVm)); } } @@ -193,7 +193,7 @@ public void checkRuleAndUserVm(FirewallRule rule, UserVm userVm, Account caller) _accountMgr.checkAccess(caller, null, false, rule, userVm); if (userVm.getState() == VirtualMachine.State.Destroyed || userVm.getState() == VirtualMachine.State.Expunging) { - throw new InvalidParameterValueException("Invalid user vm: " + userVm.getId()); + throw new InvalidParameterValueException(String.format("Invalid user vm: %s", userVm)); } } @@ -214,7 +214,7 @@ public PortForwardingRule createPortForwardingRule(final PortForwardingRule rule if (ipAddress == null) { throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " doesn't exist in the system"); } else if (ipAddress.isOneToOneNat()) { - throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " has static nat enabled"); + throw new InvalidParameterValueException(String.format("Unable to create port forwarding rule; ip %s has static nat enabled", ipAddress)); } final Long networkId = rule.getNetworkId(); @@ -227,7 +227,7 @@ public PortForwardingRule createPortForwardingRule(final PortForwardingRule rule if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.PortForwarding, networkId); - logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network {}, so assigning", network); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; @@ -267,7 +267,7 @@ public PortForwardingRule createPortForwardingRule(final PortForwardingRule rule throw new InvalidParameterValueException("Unable to create port forwarding rule on address " + ipAddress + ", invalid virtual machine id specified (" + vmId + ")."); } else if (vm.getState() == VirtualMachine.State.Destroyed || vm.getState() == VirtualMachine.State.Expunging) { - throw new InvalidParameterValueException("Invalid user vm: " + vm.getId()); + throw new InvalidParameterValueException(String.format("Invalid user vm: %s", vm)); } // Verify that vm has nic in the network @@ -345,15 +345,15 @@ public PortForwardingRule createPortForwardingRule(final PortForwardingRule rule throw (NetworkRuleConflictException)e; } - throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e); + throw new CloudRuntimeException(String.format("Unable to add rule for the ip %s", ipAddressFinal), e); } }); } finally { // release ip address if ipassoc was perfored if (performedIpAssoc) { //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); + IPAddressVO ip = _ipAddressDao.findById(ipAddress.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ip, network); } } } finally { @@ -462,9 +462,9 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is Nic guestNic; NicSecondaryIpVO nicSecIp = null; String dstIp = null; + Network network = _networkModel.getNetwork(networkId); try { - Network network = _networkModel.getNetwork(networkId); if (network == null) { throw new InvalidParameterValueException("Unable to find network by id"); } @@ -483,8 +483,7 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is if (!isSystemVm) { UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { - throw new InvalidParameterValueException("Can't enable static nat for the address id=" + ipId + ", invalid virtual machine id specified (" + vmId + - ")."); + throw new InvalidParameterValueException(String.format("Can't enable static nat for the address %s, invalid virtual machine id specified (%d).", ipAddress, vmId)); } //associate ip address to network (if needed) if (ipAddress.getAssociatedWithNetworkId() == null) { @@ -492,17 +491,16 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); - logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network {}, so assigning", network); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipId, networkId, false); performedIpAssoc = true; } catch (Exception ex) { - logger.warn("Failed to associate ip id=" + ipId + " to VPC network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate ip {} to VPC network {} as a part of enable static nat", ipAddress, network); return false; } } else if (ipAddress.isPortable()) { - logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + " so associate IP with the network " + - networkId); + logger.info("Portable IP {} is not associated with the network yet so associate IP with the network {}", ipAddress, network); try { // check if StaticNat service is enabled in the network _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); @@ -515,7 +513,7 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is // associate portable IP with guest network ipAddress = _ipAddrMgr.associatePortableIPToGuestNetwork(ipId, networkId, false); } catch (Exception e) { - logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable {} to network {} as a part of enable static nat", ipAddress, network); return false; } } @@ -531,16 +529,15 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is _ipAddrMgr.transferPortableIP(ipId, ipAddress.getAssociatedWithNetworkId(), networkId); ipAddress = _ipAddressDao.findById(ipId); } catch (Exception e) { - logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable {} to network {} as a part of enable static nat", ipAddress, network); return false; } } else { - throw new InvalidParameterValueException("Portable IP: " + ipId + " has associated services " + "in network " + - ipAddress.getAssociatedWithNetworkId() + " so can not be transferred to " + " network " + networkId); + throw new InvalidParameterValueException(String.format("Portable IP: %s has associated services in network %s so can not be transferred to network %s", + ipAddress, _networkModel.getNetwork(ipAddress.getAssociatedWithNetworkId()), network)); } } else { - throw new InvalidParameterValueException("Invalid network Id=" + networkId + ". IP is associated with" + - " a different network than passed network id"); + throw new InvalidParameterValueException(String.format("Invalid network %s. IP is associated with a different network than passed network id", network)); } } else { _networkModel.checkIpForService(ipAddress, Service.StaticNat, null); @@ -592,13 +589,14 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is ipAddress.setVmIp(dstIp); if (_ipAddressDao.update(ipAddress.getId(), ipAddress)) { // enable static nat on the backend - logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); + logger.trace("Enabling static nat for ip address {} and vm {} on the backend", + ipAddress::toString, () -> _vmInstanceDao.findById(vmId)); if (applyStaticNatForIp(ipId, false, caller, false)) { applyUserDataIfNeeded(vmId, network, guestNic); performedIpAssoc = false; // ignor unassignIPFromVpcNetwork in finally block return true; } else { - logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); + logger.warn("Failed to enable static nat rule for ip address {} on the backend", ipAddress); ipAddress.setOneToOneNat(isOneToOneNat); ipAddress.setAssociatedWithVmId(associatedWithVmId); ipAddress.setVmIp(null); @@ -609,10 +607,10 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is } } finally { - if (performedIpAssoc) { - //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); + if (performedIpAssoc) { + //if the rule is the last one for the ip address assigned to VPC, unassign it from the network + IPAddressVO ip = _ipAddressDao.findById(ipAddress.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ip, network); } } return false; @@ -782,7 +780,7 @@ public boolean revokePortForwardingRulesForVm(long vmId) { Set ipsToReprogram = new HashSet(); if (rules == null || rules.isEmpty()) { - logger.debug("No port forwarding rules are found for vm id=" + vmId); + logger.debug("No port forwarding rules are found for vm {}", vm); return true; } @@ -1159,7 +1157,7 @@ public boolean revokeAllPFAndStaticNatRulesForIp(long ipId, long userId, Account rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.StaticNat)); if (logger.isDebugEnabled() && success) { - logger.debug("Successfully released rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + logger.debug("Successfully released rules for ip {} and # of rules now = {}", ipAddress, rules.size()); } return (rules.size() == 0 && success); @@ -1318,13 +1316,13 @@ public boolean disableStaticNat(long ipId, Account caller, long callerUserId, bo // Revoke all firewall rules for the ip try { - logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of disabling static nat for public IP id=" + ipId); - if (!_firewallMgr.revokeFirewallRulesForIp(ipId, callerUserId, caller)) { - logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of disable statis nat"); + logger.debug(String.format("Revoking all %s rules as a part of disabling static nat for public IP %s", Purpose.Firewall, ipAddress)); + if (!_firewallMgr.revokeFirewallRulesForIp(ipAddress, callerUserId, caller)) { + logger.warn("Unable to revoke all the firewall rules for ip {} as a part of disable statis nat", ipAddress); success = false; } } catch (ResourceUnavailableException e) { - logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip {} as a part of ip release", ipAddress, e); success = false; } @@ -1352,7 +1350,7 @@ public boolean disableStaticNat(long ipId, Account caller, long callerUserId, bo return true; } else { - logger.warn("Failed to disable one to one nat for the ip address id" + ipId); + logger.warn("Failed to disable one to one nat for the ip address {}", ipAddress); ipAddress = _ipAddressDao.findById(ipId); ipAddress.setRuleState(null); _ipAddressDao.update(ipAddress.getId(), ipAddress); @@ -1536,19 +1534,20 @@ protected void removePFRule(PortForwardingRuleVO rule) { @Override public List listAssociatedRulesForGuestNic(Nic nic) { - logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic " + nic.getId()); + logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic {}", nic); List result = new ArrayList(); // add PF rules result.addAll(_portForwardingDao.listByNetworkAndDestIpAddr(nic.getIPv4Address(), nic.getNetworkId())); if(result.size() > 0) { - logger.debug("Found " + result.size() + " portforwarding rule configured for the nic in the network " + nic.getNetworkId()); + logger.debug("Found {} portforwarding rule configured for the nic in the network {}", + result.size(), _networkModel.getNetwork(nic.getNetworkId())); } // add static NAT rules List staticNatRules = _firewallDao.listStaticNatByVmId(nic.getInstanceId()); for (FirewallRuleVO rule : staticNatRules) { if (rule.getNetworkId() == nic.getNetworkId()) { result.add(rule); - logger.debug("Found rule " + rule.getId() + " " + rule.getPurpose() + " configured"); + logger.debug("Found rule {} configured", rule); } } List staticNatIps = _ipAddressDao.listStaticNatPublicIps(nic.getNetworkId()); @@ -1561,7 +1560,7 @@ public List listAssociatedRulesForGuestNic(Nic nic) { new FirewallRuleVO(null, ip.getId(), 0, 65535, NetUtils.ALL_PROTO.toString(), nic.getNetworkId(), vm.getAccountId(), vm.getDomainId(), Purpose.StaticNat, null, null, null, null, null); result.add(staticNatRule); - logger.debug("Found rule " + staticNatRule.getId() + " " + staticNatRule.getPurpose() + " configured"); + logger.debug("Found rule {} configured", staticNatRule); } } // add LB rules @@ -1570,7 +1569,7 @@ public List listAssociatedRulesForGuestNic(Nic nic) { FirewallRuleVO lbRule = _firewallDao.findById(lb.getLoadBalancerId()); if (lbRule.getNetworkId() == nic.getNetworkId()) { result.add(lbRule); - logger.debug("Found rule " + lbRule.getId() + " " + lbRule.getPurpose() + " configured"); + logger.debug("Found rule {} configured", lbRule); } } return result; @@ -1663,10 +1662,10 @@ public PortForwardingRule updatePortForwardingRule(UpdatePortForwardingRuleCmd c rules.add(rule); try { if (!_firewallMgr.applyRules(rules, true, false)) { - throw new CloudRuntimeException("Failed to revoke the existing port forwarding rule:" + id); + throw new CloudRuntimeException(String.format("Failed to revoke the existing port forwarding rule:%s", rule)); } } catch (ResourceUnavailableException ex) { - throw new CloudRuntimeException("Failed to revoke the existing port forwarding rule:" + id + " due to ", ex); + throw new CloudRuntimeException(String.format("Failed to revoke the existing port forwarding rule:%s due to ", rule), ex); } rule = _portForwardingDao.findById(id); @@ -1692,7 +1691,7 @@ public PortForwardingRule updatePortForwardingRule(UpdatePortForwardingRuleCmd c //apply new rules if (!applyPortForwardingRules(rule.getSourceIpAddressId(), false, caller)) { - throw new CloudRuntimeException("Failed to apply the new port forwarding rule:" + id); + throw new CloudRuntimeException(String.format("Failed to apply the new port forwarding rule: %s", rule)); } return _portForwardingDao.findById(id); diff --git a/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java b/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java index c196a27bf327..5ae102acf43a 100644 --- a/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java +++ b/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import com.cloud.network.dao.NetworkDao; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; import com.cloud.exception.ResourceUnavailableException; @@ -56,6 +57,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter _ipsToSend = new ArrayList(); NicDao nicDao = visitor.getVirtualNetworkApplianceFactory().getNicDao(); + NetworkDao networkDao = visitor.getVirtualNetworkApplianceFactory().getNetworkDao(); for (PublicIpAddress ipAddr : _ipAddresses) { String broadcastURI = BroadcastDomainType.Vlan.toUri(ipAddr.getVlanTag()).toString(); Nic nic = nicDao.findByNetworkIdInstanceIdAndBroadcastUri(ipAddr.getNetworkId(), _router.getId(), broadcastURI); @@ -63,7 +65,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter String macAddress = null; if (nic == null) { if (ipAddr.getState() != IpAddress.State.Releasing) { - throw new CloudRuntimeException("Unable to find the nic in network " + ipAddr.getNetworkId() + " to apply the ip address " + ipAddr + " for"); + throw new CloudRuntimeException(String.format("Unable to find the nic in network %s to apply the ip address %s for", networkDao.findById(ipAddr.getNetworkId()), ipAddr)); } logger.debug("Not sending release for ip address " + ipAddr + " as its nic is already gone from VPC router " + _router); } else { diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java index fd5bd4480899..dc408602c934 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -37,6 +37,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.host.dao.HostDao; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.user.securitygroup.AuthorizeSecurityGroupEgressCmd; import org.apache.cloudstack.api.command.user.securitygroup.AuthorizeSecurityGroupIngressCmd; @@ -154,6 +155,8 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro @Inject VMInstanceDao _vmDao; @Inject + HostDao hostDao; + @Inject NetworkOrchestrationService _networkMgr; @Inject NetworkModel _networkModel; @@ -758,7 +761,7 @@ public List doInTransaction(TransactionStatus status) { // Prevents other threads/management servers from creating duplicate security rules SecurityGroup securityGroup = _securityGroupDao.acquireInLockTable(securityGroupId); if (securityGroup == null) { - logger.warn("Could not acquire lock on network security group: id= " + securityGroupId); + logger.warn("Could not acquire lock on network security group: {}", securityGroup); return null; } List newRules = new ArrayList(); @@ -769,14 +772,14 @@ public List doInTransaction(TransactionStatus status) { if (ngVO.getId() != securityGroup.getId()) { final SecurityGroupVO tmpGrp = _securityGroupDao.lockRow(ngId, false); if (tmpGrp == null) { - logger.warn("Failed to acquire lock on security group: " + ngId); - throw new CloudRuntimeException("Failed to acquire lock on security group: " + ngId); + logger.warn("Failed to acquire lock on security group: {}", ngVO); + throw new CloudRuntimeException(String.format("Failed to acquire lock on security group: %s", ngVO)); } } SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndAllowedGroupId(securityGroup.getId(), protocolFinal, startPortOrTypeFinal, endPortOrCodeFinal, ngVO.getId()); if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { - logger.warn("The rule already exists. id= " + securityGroupRule.getUuid()); + logger.warn("The rule {} already exists.", securityGroupRule); continue; // rule already exists. } securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrTypeFinal, endPortOrCodeFinal, protocolFinal, ngVO.getId()); @@ -796,7 +799,7 @@ public List doInTransaction(TransactionStatus status) { } } if (logger.isDebugEnabled()) { - logger.debug("Added " + newRules.size() + " rules to security group " + securityGroup.getName()); + logger.debug("Added {} rules to security group {}", newRules.size(), securityGroup); } return newRules; } catch (Exception e) { @@ -852,8 +855,8 @@ private boolean revokeSecurityGroupRule(final Long id, SecurityRuleType type) { // check type if (type != rule.getRuleType()) { - logger.debug("Mismatch in rule type for security rule with id " + id); - throw new InvalidParameterValueException("Mismatch in rule type for security rule with id " + id); + logger.debug("Mismatch in rule type for security rule {}", rule); + throw new InvalidParameterValueException(String.format("Mismatch in rule type for security rule %s", rule)); } // Check permissions @@ -870,12 +873,12 @@ public Boolean doInTransaction(TransactionStatus status) { // acquire lock on parent group (preserving this logic) groupHandle = _securityGroupDao.acquireInLockTable(rule.getSecurityGroupId()); if (groupHandle == null) { - logger.warn("Could not acquire lock on security group id: " + rule.getSecurityGroupId()); + logger.warn("Could not acquire lock on security group: {}", securityGroup); return false; } _securityGroupRuleDao.remove(id); - logger.debug("revokeSecurityGroupRule succeeded for security rule id: " + id); + logger.debug("revokeSecurityGroupRule succeeded for security rule: {}", rule); return true; } catch (Exception e) { @@ -928,9 +931,9 @@ public SecurityGroupVO createSecurityGroup(String name, String description, Long if (group == null) { group = new SecurityGroupVO(name, description, domainId, accountId); group = _securityGroupDao.persist(group); - logger.debug("Created security group " + group + " for account id=" + accountId); + logger.debug("Created security group {} for account [id: {}, name: {}]", group, accountId, accountName); } else { - logger.debug("Returning existing security group " + group + " for account id=" + accountId); + logger.debug("Returning existing security group {} for account [id: {}, name: {}]", group, accountId, accountName); } return group; @@ -1032,14 +1035,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { locked = true; return; } - logger.warn("Unable to acquire lock on vm id=" + userVmId); + logger.warn("Unable to acquire lock on vm {}", vm); return; } locked = true; Long agentId = null; VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId); if (log == null) { - logger.warn("Cannot find log record for vm id=" + userVmId); + logger.warn("Cannot find log record for vm {}", vm); return; } seqnum = log.getLogsequence(); @@ -1066,7 +1069,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { _agentMgr.send(agentId, cmds, _answerListener); } catch (AgentUnavailableException e) { - logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + Long finalAgentId = agentId; + logger.debug("Unable to send ingress rules updates for vm: {} (agent={})", + vm::toString, () -> hostDao.findByIdIncludingRemoved(finalAgentId)); _workDao.updateStep(work.getInstanceId(), seqnum, Step.Done); } @@ -1085,9 +1090,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @Override @DB - public boolean addInstanceToGroups(final Long userVmId, final List groups) { + public boolean addInstanceToGroups(final UserVm userVm, final List groups) { + long userVmId = userVm.getId(); if (!isVmSecurityGroupEnabled(userVmId)) { - logger.trace("User vm " + userVmId + " is not security group enabled, not adding it to security group"); + logger.trace("User vm {} is not security group enabled, not adding it to security group", userVm); return false; } if (groups != null && !groups.isEmpty()) { @@ -1102,16 +1108,15 @@ public Boolean doInTransaction(TransactionStatus status) { final Set uniqueGroups = new TreeSet(new SecurityGroupVOComparator()); uniqueGroups.addAll(sgs); if (userVm == null) { - logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm {}", userVm); } try { for (SecurityGroupVO securityGroup : uniqueGroups) { // don't let the group be deleted from under us. SecurityGroupVO ngrpLock = _securityGroupDao.lockRow(securityGroup.getId(), false); if (ngrpLock == null) { - logger.warn("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); - throw new ConcurrentModificationException("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" - + securityGroup.getName()); + logger.warn("Failed to acquire lock on network group {}", securityGroup); + throw new ConcurrentModificationException(String.format("Failed to acquire lock on network group %s", securityGroup)); } if (_securityGroupVMMapDao.findByVmIdGroupId(userVmId, securityGroup.getId()) == null) { SecurityGroupVMMapVO groupVmMapVO = new SecurityGroupVMMapVO(securityGroup.getId(), userVmId); @@ -1133,9 +1138,10 @@ public Boolean doInTransaction(TransactionStatus status) { @Override @DB - public void removeInstanceFromGroups(final long userVmId) { + public void removeInstanceFromGroups(final UserVm vm) { + long userVmId = vm.getId(); if (_securityGroupVMMapDao.countSGForVm(userVmId) < 1) { - logger.trace("No security groups found for vm id=" + userVmId + ", returning"); + logger.trace("No security groups found for vm {}, returning", vm); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -1144,14 +1150,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created in // addInstance if (userVm == null) { - logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm {}", vm); } int n = _securityGroupVMMapDao.deleteVM(userVmId); - logger.info("Disassociated " + n + " network groups " + " from uservm " + userVmId); + logger.info("Disassociated {} network groups from uservm {}", n, vm); _userVMDao.releaseFromLockTable(userVmId); } }); - logger.debug("Security group mappings are removed successfully for vm id=" + userVmId); + logger.debug("Security group mappings are removed successfully for vm {}", vm); } @DB @@ -1168,7 +1174,7 @@ public SecurityGroup updateSecurityGroup(UpdateSecurityGroupCmd cmd) { } if (newName == null) { - logger.debug("security group name is not changed. id=" + groupId); + logger.debug("security group [{}] name is not changed.", group); return group; } @@ -1188,7 +1194,7 @@ public SecurityGroupVO doInTransaction(TransactionStatus status) { } if (newName.equals(group.getName())) { - logger.debug("security group name is not changed. id=" + groupId); + logger.debug("security group [{}] name is not changed.", group); return group; } else if (newName.equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { throw new InvalidParameterValueException("The security group name " + SecurityGroupManager.DEFAULT_GROUP_NAME + " is reserved"); @@ -1201,7 +1207,7 @@ public SecurityGroupVO doInTransaction(TransactionStatus status) { group.setName(newName); _securityGroupDao.update(groupId, group); - logger.debug("Updated security group id=" + groupId); + logger.debug("Updated security group {}", group); return group; } @@ -1226,12 +1232,12 @@ public boolean deleteSecurityGroup(DeleteSecurityGroupCmd cmd) throws ResourceIn boolean result = Transaction.execute(new TransactionCallbackWithException() { @Override public Boolean doInTransaction(TransactionStatus status) throws ResourceInUseException { - SecurityGroupVO group = _securityGroupDao.lockRow(groupId, true); - if (group == null) { - throw new InvalidParameterValueException("Unable to find security group by id " + groupId); + SecurityGroupVO groupLock = _securityGroupDao.lockRow(groupId, true); + if (groupLock == null) { + throw new InvalidParameterValueException(String.format("Unable to get lock on security group %s", group)); } - if (group.getName().equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { + if (groupLock.getName().equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { throw new InvalidParameterValueException("The network group default is reserved"); } @@ -1245,7 +1251,7 @@ public Boolean doInTransaction(TransactionStatus status) throws ResourceInUseExc _securityGroupDao.expunge(groupId); - logger.debug("Deleted security group id=" + groupId); + logger.debug("Deleted security group {}", group); return true; } @@ -1362,17 +1368,17 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t Event event = transition.getEvent(); if (VirtualMachine.State.isVmStarted(oldState, event, newState)) { if (logger.isTraceEnabled()) { - logger.trace("Security Group Mgr: handling start of vm id" + vm.getId()); + logger.trace("Security Group Mgr: handling start of vm {}", vm); } handleVmStarted((VMInstanceVO)vm); } else if (VirtualMachine.State.isVmStopped(oldState, event, newState)) { if (logger.isTraceEnabled()) { - logger.trace("Security Group Mgr: handling stop of vm id" + vm.getId()); + logger.trace("Security Group Mgr: handling stop of vm {}", vm); } handleVmStopped((VMInstanceVO)vm); } else if (VirtualMachine.State.isVmMigrated(oldState, event, newState)) { if (logger.isTraceEnabled()) { - logger.trace("Security Group Mgr: handling migration of vm id" + vm.getId()); + logger.trace("Security Group Mgr: handling migration of vm {}", vm); } handleVmMigrated((VMInstanceVO)vm); } @@ -1408,7 +1414,7 @@ public boolean isVmMappedToDefaultSecurityGroup(long vmId) { UserVmVO vm = _userVmMgr.getVirtualMachine(vmId); SecurityGroup defaultGroup = getDefaultSecurityGroup(vm.getAccountId()); if (defaultGroup == null) { - logger.warn("Unable to find default security group for account id=" + vm.getAccountId()); + logger.warn("Unable to find default security group for account {}", () -> _accountMgr.getAccount(vm.getAccountId())); return false; } SecurityGroupVMMapVO map = _securityGroupVMMapDao.findByVmIdGroupId(vmId, defaultGroup.getId()); diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java index bd6f0e32bb02..230aa0ec2311 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java @@ -172,7 +172,7 @@ public void sendRulesetUpdates(SecurityGroupWork work) { if (vm != null && vm.getState() == State.Running) { if (logger.isTraceEnabled()) { - logger.trace("SecurityGroupManager v2: found vm, " + userVmId + " state=" + vm.getState()); + logger.trace("SecurityGroupManager v2: found vm {}, state={}", vm, vm.getState()); } Map> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule); Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); @@ -192,18 +192,17 @@ public void sendRulesetUpdates(SecurityGroupWork work) { ingressRules, egressRules, nicSecIps); cmd.setMsId(_serverId); if (logger.isDebugEnabled()) { - logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + ":ingress num rules=" + - cmd.getIngressRuleSet().size() + ":egress num rules=" + cmd.getEgressRuleSet().size() + " num cidrs=" + cmd.getTotalNumCidrs() + " sig=" + - cmd.getSignature()); + logger.debug("SecurityGroupManager v2: sending ruleset update for vm {} ingress num rules={} egress num rules={} num cidrs={} sig={}", + vm, cmd.getIngressRuleSet().size(), cmd.getEgressRuleSet().size(), cmd.getTotalNumCidrs(), cmd.getSignature()); } Commands cmds = new Commands(cmd); try { _agentMgr.send(agentId, cmds, _answerListener); if (logger.isTraceEnabled()) { - logger.trace("SecurityGroupManager v2: sent ruleset updates for " + vm.getInstanceName() + " curr queue size=" + _workQueue.size()); + logger.trace("SecurityGroupManager v2: sent ruleset updates for {} curr queue size={}", vm, _workQueue.size()); } } catch (AgentUnavailableException e) { - logger.debug("Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + logger.debug("Unable to send updates for vm: {} (agent={})", vm, hostDao.findByIdIncludingRemoved(agentId)); _workTracker.handleException(agentId); } } diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java index b20e1af4673f..60d144b84b2b 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -117,7 +117,7 @@ public boolean applyNetworkACL(final long aclId) throws ResourceUnavailableExcep if (!applyACLToPrivateGw(privateGateway)) { aclApplyStatus = false; - logger.debug("failed to apply network acl item on private gateway " + privateGateway.getId() + "acl id " + aclId); + logger.debug("failed to apply network acl item on private gateway {} acl {}", privateGateway::getUuid, () -> _networkACLDao.findById(aclId)); break; } } @@ -172,7 +172,7 @@ public boolean replaceNetworkACLForPrivateGw(final NetworkACL acl, final Private //Otherwise existing rules will not be removed on the router element logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); if (!revokeACLItemsForPrivateGw(gateway)) { - throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL " + "items for privatewa gateway: " + gateway.getId()); + throw new CloudRuntimeException(String.format("Failed to replace network ACL. Error while removing existing ACL items for private gateway: [id: %d, uuid: %s]", gateway.getId(), gateway.getUuid())); } } @@ -206,7 +206,7 @@ public boolean replaceNetworkACL(final NetworkACL acl, final NetworkVO network) logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); } else { if (!revokeACLItemsForNetwork(network.getId())) { - throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL items for network: " + network.getId()); + throw new CloudRuntimeException(String.format("Failed to replace network ACL. Error while removing existing ACL items for network: %s", network)); } } } @@ -214,7 +214,7 @@ public boolean replaceNetworkACL(final NetworkACL acl, final NetworkVO network) network.setNetworkACLId(acl.getId()); //Update Network ACL if (_networkDao.update(network.getId(), network)) { - logger.debug("Updated network: " + network.getId() + " with Network ACL Id: " + acl.getId() + ", Applying ACL items"); + logger.debug("Updated network: {} with Network ACL Id: {}, Applying ACL items", network, acl); //Apply ACL to network final Boolean result = applyACLToNetwork(network.getId()); if (result) { @@ -293,12 +293,12 @@ public boolean revokeACLItemsForNetwork(final long networkId) throws ResourceUna } final List aclItems = _networkACLItemDao.listByACL(network.getNetworkACLId()); if (aclItems.isEmpty()) { - logger.debug("Found no network ACL Items for network id=" + networkId); + logger.debug("Found no network ACL Items for network={}", network); return true; } if (logger.isDebugEnabled()) { - logger.debug("Releasing " + aclItems.size() + " Network ACL Items for network id=" + networkId); + logger.debug("Releasing {} Network ACL Items for network={}", aclItems.size(), network); } for (final NetworkACLItemVO aclItem : aclItems) { @@ -311,7 +311,7 @@ public boolean revokeACLItemsForNetwork(final long networkId) throws ResourceUna final boolean success = applyACLItemsToNetwork(network.getId(), aclItems); if (logger.isDebugEnabled() && success) { - logger.debug("Successfully released Network ACLs for network id=" + networkId + " and # of rules now = " + aclItems.size()); + logger.debug("Successfully released Network ACLs for network={} and # of rules now = {}", network, aclItems.size()); } return success; @@ -322,12 +322,12 @@ public boolean revokeACLItemsForPrivateGw(final PrivateGateway gateway) throws R final long networkACLId = gateway.getNetworkACLId(); final List aclItems = _networkACLItemDao.listByACL(networkACLId); if (aclItems.isEmpty()) { - logger.debug("Found no network ACL Items for private gateway 'id=" + gateway.getId() + "'"); + logger.debug("Found no network ACL Items for private gateway {}", gateway); return true; } if (logger.isDebugEnabled()) { - logger.debug("Releasing " + aclItems.size() + " Network ACL Items for private gateway id=" + gateway.getId()); + logger.debug("Releasing {} Network ACL Items for private gateway {}", aclItems.size(), gateway); } for (final NetworkACLItemVO aclItem : aclItems) { @@ -340,7 +340,7 @@ public boolean revokeACLItemsForPrivateGw(final PrivateGateway gateway) throws R final boolean success = applyACLToPrivateGw(gateway, aclItems); if (logger.isDebugEnabled() && success) { - logger.debug("Successfully released Network ACLs for private gateway id=" + gateway.getId() + " and # of rules now = " + aclItems.size()); + logger.debug("Successfully released Network ACLs for private gateway={} and # of rules now = {}", gateway, aclItems.size()); } return success; @@ -437,7 +437,7 @@ public boolean applyACLItemsToNetwork(final long networkId, final List allAclRules) { if (CollectionUtils.isEmpty(allAclRules)) { - logger.debug(String.format("No ACL rules for [id=%s, name=%s]. Therefore, there is no need for consistency validation.", lockedAcl.getUuid(), lockedAcl.getName())); + logger.debug("No ACL rules for {}. Therefore, there is no need for consistency validation.", lockedAcl); return; } String aclConsistencyHash = moveNetworkAclItemCmd.getAclConsistencyHash(); diff --git a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index 6d24c0fe700d..399abbfad176 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@ -985,8 +985,7 @@ public boolean deleteVpcOffering(final long offId) { // (the offering can be disabled though) final int vpcCount = vpcDao.getVpcCountByOfferingId(offId); if (vpcCount > 0) { - throw new InvalidParameterValueException("Can't delete vpc offering " + offId + " as its used by " + vpcCount + " vpcs. " - + "To make the network offering unavailable, disable it"); + throw new InvalidParameterValueException(String.format("Can't delete vpc offering %s as its used by %d vpcs. To make the network offering unavailable, disable it", offering, vpcCount)); } if (_vpcOffDao.remove(offId)) { @@ -1118,8 +1117,9 @@ private VpcOffering updateVpcOfferingInternal(long vpcOffId, String vpcOfferingN vpcOfferingDetailsDao.persist(detailVO); } } - logger.debug("Updated VPC offeirng id=" + vpcOffId); - return _vpcOffDao.findById(vpcOffId); + VpcOfferingVO updatedVpcOffering = _vpcOffDao.findById(vpcOffId); + logger.debug("Updated VPC offering {}", updatedVpcOffering); + return updatedVpcOffering; } @Override @@ -1360,7 +1360,7 @@ private void allocateSourceNatIp(Vpc vpc, String sourceNatIP) { logger.debug(String.format("Reserving a source NAT IP for NSX VPC %s", vpc.getName())); sourceNatIP = reserveSourceNatIpForNsxVpc(account, zone); } - IpAddress ip = _ipAddrMgr.allocateIp(account, false, CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), zone, null, sourceNatIP); + IpAddress ip = _ipAddrMgr.allocateIp(account, false, CallContext.current().getCallingAccount(), CallContext.current().getCallingUser(), zone, null, sourceNatIP); this.associateIPToVpc(ip.getId(), vpc.getId()); } catch (ResourceAllocationException | ResourceUnavailableException | InsufficientAddressCapacityException e){ throw new CloudRuntimeException("new source NAT address cannot be acquired", e); @@ -1495,7 +1495,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } // cleanup vpc resources - if (!cleanupVpcResources(vpc.getId(), caller, callerUserId)) { + if (!cleanupVpcResources(vpc, caller, callerUserId)) { logger.warn("Failed to cleanup resources for vpc " + vpc); return false; } @@ -1557,7 +1557,7 @@ public Vpc updateVpc(final long vpcId, final String vpcName, final String displa boolean restartRequired = checkAndUpdateRouterSourceNatIp(vpcToUpdate, sourceNatIp); if (vpcDao.update(vpcId, vpc) || restartRequired) { // Note that the update may fail because nothing has changed, other than the sourcenat ip - logger.debug("Updated VPC id=" + vpcId); + logger.debug("Updated VPC {}", vpc); if (restartRequired) { if (logger.isDebugEnabled()) { logger.debug(String.format("restarting vpc %s/%s, due to changing sourcenat in Update VPC call", vpc.getName(), vpc.getUuid())); @@ -2188,19 +2188,19 @@ public List getVpcsForAccount(final long accountId) { return vpcs; } - public boolean cleanupVpcResources(final long vpcId, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - logger.debug("Cleaning up resources for vpc id=" + vpcId); + public boolean cleanupVpcResources(final Vpc vpc, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { + logger.debug("Cleaning up resources for vpc {}", vpc); boolean success = true; // 1) Remove VPN connections and VPN gateway logger.debug("Cleaning up existed site to site VPN connections"); - _s2sVpnMgr.cleanupVpnConnectionByVpc(vpcId); + _s2sVpnMgr.cleanupVpnConnectionByVpc(vpc.getId()); logger.debug("Cleaning up existed site to site VPN gateways"); - _s2sVpnMgr.cleanupVpnGatewayByVpc(vpcId); + _s2sVpnMgr.cleanupVpnGatewayByVpc(vpc.getId()); // 2) release all ip addresses - final List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpcId, null); - logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); + final List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpc.getId(), null); + logger.debug("Releasing ips for vpc {} as a part of vpc cleanup", vpc); for (final IPAddressVO ipToRelease : ipsToRelease) { if (ipToRelease.isPortable()) { // portable IP address are associated with owner, until @@ -2211,38 +2211,38 @@ public boolean cleanupVpcResources(final long vpcId, final Account caller, final _ipAddressDao.update(ipToRelease.getId(), ipToRelease); logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); } else { - success = success && _ipAddrMgr.disassociatePublicIpAddress(ipToRelease.getId(), callerUserId, caller); + success = success && _ipAddrMgr.disassociatePublicIpAddress(ipToRelease, callerUserId, caller); if (!success) { - logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); + logger.warn("Failed to cleanup ip {} as a part of vpc {} cleanup", ipToRelease, vpc); } } } if (success) { - logger.debug("Released ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.debug("Released ip addresses for vpc {} as a part of cleanup vpc process", vpc); } else { - logger.warn("Failed to release ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.warn("Failed to release ip addresses for vpc {} as a part of cleanup vpc process", vpc); // although it failed, proceed to the next cleanup step as it // doesn't depend on the public ip release } // 3) Delete all static route rules - if (!revokeStaticRoutesForVpc(vpcId, caller)) { - logger.warn("Failed to revoke static routes for vpc " + vpcId + " as a part of cleanup vpc process"); + if (!revokeStaticRoutesForVpc(vpc, caller)) { + logger.warn("Failed to revoke static routes for vpc {} as a part of cleanup vpc process", vpc); return false; } // 4) Delete private gateways - final List gateways = getVpcPrivateGateways(vpcId); + final List gateways = getVpcPrivateGateways(vpc.getId()); if (gateways != null) { for (final PrivateGateway gateway : gateways) { if (gateway != null) { - logger.debug("Deleting private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleting private gateway {} as a part of vpc {} resources cleanup", gateway, vpc); if (!deleteVpcPrivateGateway(gateway.getId())) { success = false; - logger.debug("Failed to delete private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Failed to delete private gateway {} as a part of vpc {} resources cleanup", gateway, vpc); } else { - logger.debug("Deleted private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleted private gateway {} as a part of vpc {} resources cleanup", gateway, vpc); } } } @@ -2253,7 +2253,7 @@ public boolean cleanupVpcResources(final long vpcId, final Account caller, final searchBuilder.and("vpcId", searchBuilder.entity().getVpcId(), Op.IN); final SearchCriteria searchCriteria = searchBuilder.create(); - searchCriteria.setParameters("vpcId", vpcId); + searchCriteria.setParameters("vpcId", vpc.getId()); final Filter filter = new Filter(NetworkACLVO.class, "id", false, null, null); final Pair, Integer> aclsCountPair = _networkAclDao.searchAndCount(searchCriteria, filter); @@ -2263,15 +2263,14 @@ public boolean cleanupVpcResources(final long vpcId, final Account caller, final _networkAclMgr.deleteNetworkACL(networkAcl); } - routedIpv4Manager.releaseBgpPeersForVpc(vpcId); - routedIpv4Manager.releaseIpv4SubnetForVpc(vpcId); + routedIpv4Manager.releaseBgpPeersForVpc(vpc.getId()); + routedIpv4Manager.releaseIpv4SubnetForVpc(vpc.getId()); - VpcVO vpc = vpcDao.findById(vpcId); annotationDao.removeByEntityType(AnnotationService.EntityType.VPC.name(), vpc.getUuid()); ASNumberVO asNumber = asNumberDao.findByZoneAndVpcId(vpc.getZoneId(), vpc.getId()); if (asNumber != null) { - logger.debug(String.format("Releasing AS number %s from VPC %s", asNumber.getAsNumber(), vpc.getName())); + logger.debug("Releasing AS number {} from VPC {}", asNumber.getAsNumber(), vpc); bgpService.releaseASNumber(vpc.getZoneId(), asNumber.getAsNumber(), true); } @@ -2460,7 +2459,7 @@ private PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNe // try to create it } if (privateNtwk == null) { - logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri + " and associated network id: " + associatedNetworkId); + logger.info("creating new network for vpc {} using broadcast uri: {} and associated network: {}", vpc, broadcastUri, _ntwkDao.findById(associatedNetworkId)); final String networkName = "vpc-" + vpc.getName() + "-privateNetwork"; privateNtwk = _ntwkSvc.createPrivateNetwork(networkName, networkName, physicalNetworkIdFinal, broadcastUri, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId, isSourceNat, networkOfferingId, bypassVlanOverlapCheck, associatedNetworkId); @@ -2480,7 +2479,7 @@ private PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNe final Long nextMac = mac + 1; dc.setMacAddress(nextMac); - logger.info("creating private ip address for vpc (" + ipAddress + ", " + privateNtwk.getId() + ", " + nextMac + ", " + vpcId + ", " + isSourceNat + ")"); + logger.info("creating private ip address for vpc ({}, {}, {}, {}, {})", ipAddress, privateNtwk, nextMac, vpcId, isSourceNat); privateIp = new PrivateIpVO(ipAddress, privateNtwk.getId(), nextMac, vpcId, isSourceNat); _privateIpDao.persist(privateIp); @@ -2671,7 +2670,7 @@ public boolean deleteVpcPrivateGateway(final long gatewayId) throws ConcurrentOp final VpcGatewayVO gatewayVO = _vpcGatewayDao.acquireInLockTable(gatewayId); if (gatewayVO == null || gatewayVO.getType() != VpcGateway.Type.Private) { - throw new ConcurrentOperationException("Unable to lock gateway " + gatewayId); + throw new ConcurrentOperationException(String.format("Unable to lock gateway %s", gatewayToBeDeleted)); } final Account caller = CallContext.current().getCallingAccount(); @@ -2745,6 +2744,7 @@ private void cleanUpRoutesByGatewayId(long gatewayId){ protected boolean deletePrivateGatewayFromTheDB(final PrivateGateway gateway) { // check if there are ips allocted in the network final long networkId = gateway.getNetworkId(); + NetworkVO network = _ntwkDao.findById(networkId); vpcTxCallable.setGateway(gateway); @@ -2759,12 +2759,10 @@ protected boolean deletePrivateGatewayFromTheDB(final PrivateGateway gateway) { final Account owner = _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); _ntwkMgr.destroyNetwork(networkId, context, false); - logger.debug("Deleted private network id=" + networkId); + logger.debug("Deleted private network {}", network); } - } catch (final InterruptedException e) { - logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); - } catch (final ExecutionException e) { - logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); + } catch (final InterruptedException | ExecutionException e) { + logger.error("deletePrivateGatewayFromTheDB failed to delete network {} due to => ", network, e); } return true; @@ -2922,10 +2920,10 @@ public boolean revokeStaticRoute(final long routeId) throws ResourceUnavailableE } @DB - protected boolean revokeStaticRoutesForVpc(final long vpcId, final Account caller) throws ResourceUnavailableException { + protected boolean revokeStaticRoutesForVpc(final Vpc vpc, final Account caller) throws ResourceUnavailableException { // get all static routes for the vpc - final List routes = _staticRouteDao.listByVpcId(vpcId); - logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); + final List routes = _staticRouteDao.listByVpcId(vpc.getId()); + logger.debug("Found {} to revoke for the vpc {}", routes.size(), vpc); if (!routes.isEmpty()) { // mark all of them as revoke Transaction.execute(new TransactionCallbackNoReturn() { @@ -2936,7 +2934,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } } }); - return applyStaticRoutesForVpc(vpcId); + return applyStaticRoutesForVpc(vpc.getId()); } return true; @@ -3223,7 +3221,13 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { @Override public void unassignIPFromVpcNetwork(final long ipId, final long networkId) { - final IPAddressVO ip = _ipAddressDao.findById(ipId); + IPAddressVO ip = _ipAddressDao.findById(ipId); + Network network = _ntwkModel.getNetwork(networkId); + unassignIPFromVpcNetwork(ip, network); + } + + @Override + public void unassignIPFromVpcNetwork(final IPAddressVO ip, final Network network) { if (isIpAllocatedToVpc(ip)) { return; } @@ -3232,23 +3236,23 @@ public void unassignIPFromVpcNetwork(final long ipId, final long networkId) { return; } - logger.debug("Releasing VPC ip address " + ip + " from vpc network id=" + networkId); + logger.debug("Releasing VPC ip address {} from vpc network {}", ip, network); final long vpcId = ip.getVpcId(); boolean success = false; try { // unassign ip from the VPC router - success = _ipAddrMgr.applyIpAssociations(_ntwkModel.getNetwork(networkId), true); + success = _ipAddrMgr.applyIpAssociations(network, true); } catch (final ResourceUnavailableException ex) { - throw new CloudRuntimeException("Failed to apply ip associations for network id=" + networkId + " as a part of unassigning ip " + ipId + " from vpc", ex); + throw new CloudRuntimeException("Failed to apply ip associations for network id=" + network + " as a part of unassigning ip " + ip + " from vpc", ex); } if (success) { ip.setAssociatedWithNetworkId(null); - _ipAddressDao.update(ipId, ip); - logger.debug("IP address " + ip + " is no longer associated with the network inside vpc id=" + vpcId); + _ipAddressDao.update(ip.getId(), ip); + logger.debug("IP address {} is no longer associated with the network inside vpc {}", ip, vpcDao.findById(vpcId)); } else { - throw new CloudRuntimeException("Failed to apply ip associations for network id=" + networkId + " as a part of unassigning ip " + ipId + " from vpc"); + throw new CloudRuntimeException(String.format("Failed to apply ip associations for network %s as a part of unassigning ip %s from vpc", network, ip)); } logger.debug("Successfully released VPC ip address " + ip + " back to VPC pool "); } @@ -3453,7 +3457,7 @@ private boolean rollingRestartVpc(final Vpc vpc, final ReservationContext contex // Re-program VPC VR or add a new backup router for redundant VPC if (!startVpc(vpc, dest, context)) { - logger.debug("Failed to re-program VPC router or deploy a new backup router for VPC" + vpc); + logger.debug("Failed to re-program VPC router or deploy a new backup router for VPC{}", vpc); return false; } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java b/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java index 072b17ab9b99..45a629739475 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java @@ -21,6 +21,7 @@ import javax.inject.Inject; +import com.cloud.network.dao.NetworkDao; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; @@ -40,6 +41,8 @@ public class VpcPrivateGatewayTransactionCallable implements Callable { private VpcGatewayDao _vpcGatewayDao; @Inject private PrivateIpDao _privateIpDao; + @Inject + private NetworkDao networkDao; private PrivateGateway gateway; private boolean deleteNetwork = true; @@ -54,7 +57,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List privateIps = _privateIpDao.listByNetworkId(networkId); if (privateIps.size() > 1 || !privateIps.get(0).getIpAddress().equalsIgnoreCase(gateway.getIp4Address())) { - logger.debug("Not removing network id=" + gateway.getNetworkId() + " as it has private ip addresses for other gateways"); + logger.debug("Not removing network {} as it has private ip addresses for other gateways", networkDao.findById(gateway.getNetworkId())); deleteNetwork = false; } diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 6cef834b0f77..63ef6bdce386 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -329,8 +329,7 @@ public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, final bool }catch (ResourceUnavailableException ex) { vpn.setState(prevState); _remoteAccessVpnDao.update(vpn.getId(), vpn); - logger.debug("Failed to stop the vpn " + vpn.getId() + " , so reverted state to "+ - RemoteAccessVpn.State.Running); + logger.debug("Failed to stop the vpn {}, so reverted state to {}", vpn, RemoteAccessVpn.State.Running); success = false; } finally { if (success|| forceCleanup) { @@ -435,10 +434,10 @@ public VpnUser doInTransaction(TransactionStatus status) { @DB @Override - public boolean removeVpnUser(long vpnOwnerId, String username, Account caller) { - final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, username); + public boolean removeVpnUser(Account vpnOwner, String username, Account caller) { + final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwner.getId(), username); if (user == null) { - String errorMessage = String.format("Could not find VPN user=[%s]. VPN owner id=[%s]", username, vpnOwnerId); + String errorMessage = String.format("Could not find VPN user=[%s]. VPN owner=[%s]", username, vpnOwner); logger.debug(errorMessage); throw new InvalidParameterValueException(errorMessage); } @@ -521,14 +520,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } @DB - private boolean removeVpnUserWithoutRemoteAccessVpn(long vpnOwnerId, String userName) { - VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, userName); + private boolean removeVpnUserWithoutRemoteAccessVpn(Account vpnOwner, String userName) { + VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(vpnOwner.getId(), userName); if (vpnUser == null) { - logger.error(String.format("VPN user not found with ownerId: %d and username: %s", vpnOwnerId, userName)); + logger.error("VPN user not found with owner: {} and username: {}", vpnOwner, userName); return false; } if (!State.Revoke.equals(vpnUser.getState())) { - logger.error(String.format("VPN user with ownerId: %d and username: %s is not in revoked state, current state: %s", vpnOwnerId, userName, vpnUser.getState())); + logger.error("VPN user with ownerId: {} and username: {} is not in revoked state, current state: {}", vpnOwner, userName, vpnUser.getState()); return false; } return _vpnUsersDao.remove(vpnUser.getId()); @@ -546,7 +545,7 @@ public boolean applyVpnUsers(long vpnOwnerId, String userName, boolean forRemove if (CollectionUtils.isEmpty(vpns)) { if (forRemove) { - return removeVpnUserWithoutRemoteAccessVpn(vpnOwnerId, userName); + return removeVpnUserWithoutRemoteAccessVpn(owner, userName); } logger.warn(String.format("Unable to apply VPN user due to there are no remote access VPNs configured on %s to apply VPN user.", owner.toString())); return true; @@ -578,7 +577,7 @@ public boolean applyVpnUsers(long vpnOwnerId, String userName, boolean forRemove if (indexUser == users.size()) { indexUser = 0; } - logger.debug("VPN User " + users.get(indexUser) + (result == null ? " is set on " : (" couldn't be set due to " + result) + " on ") + vpn.getUuid()); + logger.debug("VPN User {}{}{}", users.get(indexUser), result == null ? " is set on " : (" couldn't be set due to " + result) + " on ", vpn); if (result == null) { if (finals[indexUser] == null) { finals[indexUser] = true; diff --git a/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index 094f81607fe3..ed83e396abfb 100644 --- a/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -134,12 +134,12 @@ public Site2SiteVpnGateway createVpnGateway(CreateVpnGatewayCmd cmd) { } Site2SiteVpnGatewayVO gws = _vpnGatewayDao.findByVpcId(vpcId); if (gws != null) { - throw new InvalidParameterValueException("The VPN gateway of VPC " + vpcId + " already existed!"); + throw new InvalidParameterValueException(String.format("The VPN gateway of VPC %s already existed!", vpc)); } //Use source NAT ip for VPC List ips = _ipAddressDao.listByAssociatedVpc(vpcId, true); if (ips.size() != 1) { - throw new CloudRuntimeException("Cannot found source nat ip of vpc " + vpcId); + throw new CloudRuntimeException(String.format("Cannot found source nat ip of vpc %s", vpc)); } Site2SiteVpnGatewayVO gw = new Site2SiteVpnGatewayVO(owner.getAccountId(), owner.getDomainId(), ips.get(0).getId(), vpcId); @@ -266,7 +266,7 @@ public Site2SiteVpnConnection createVpnConnection(CreateVpnConnectionCmd cmd) { Site2SiteVpnGateway vpnGateway = getAndValidateSite2SiteVpnGateway(vpnGatewayId, caller); validateVpnConnectionOfTheRightAccount(customerGateway, vpnGateway); - validateVpnConnectionDoesntExist(vpnGatewayId, customerGatewayId); + validateVpnConnectionDoesntExist(customerGateway, vpnGateway); validatePrerequisiteVpnGateway(vpnGateway); String[] cidrList = customerGateway.getGuestCidrList().split(","); @@ -275,8 +275,7 @@ public Site2SiteVpnConnection createVpnConnection(CreateVpnConnectionCmd cmd) { String vpcCidr = _vpcDao.findById(vpnGateway.getVpcId()).getCidr(); for (String cidr : cidrList) { if (NetUtils.isNetworksOverlap(vpcCidr, cidr)) { - throw new InvalidParameterValueException("The subnets of customer gateway " + customerGatewayId + "'s subnet " + cidr + " is overlapped with VPC cidr " + - vpcCidr + "!"); + throw new InvalidParameterValueException(String.format("The subnets of customer gateway %s subnet %s is overlapped with VPC cidr %s!", customerGateway, cidr, vpcCidr)); } } @@ -335,10 +334,9 @@ private void validateVpnConnectionOfTheRightAccount(Site2SiteCustomerGateway cus } } - private void validateVpnConnectionDoesntExist(Long vpnGatewayId, Long customerGatewayId) { - if (_vpnConnectionDao.findByVpnGatewayIdAndCustomerGatewayId(vpnGatewayId, customerGatewayId) != null) { - throw new InvalidParameterValueException("The vpn connection with customer gateway id " + customerGatewayId + " and vpn gateway id " + vpnGatewayId + - " already existed!"); + private void validateVpnConnectionDoesntExist(Site2SiteCustomerGateway customerGateway, Site2SiteVpnGateway vpnGateway) { + if (_vpnConnectionDao.findByVpnGatewayIdAndCustomerGatewayId(vpnGateway.getId(), customerGateway.getId()) != null) { + throw new InvalidParameterValueException(String.format("The vpn connection with customer gateway %s and vpn gateway %s already existed!", customerGateway, vpnGateway)); } } @@ -414,7 +412,7 @@ protected boolean doDeleteCustomerGateway(Site2SiteCustomerGateway gw) { long id = gw.getId(); List vpnConnections = _vpnConnectionDao.listByCustomerGatewayId(id); if (!CollectionUtils.isEmpty(vpnConnections)) { - throw new InvalidParameterValueException("Unable to delete VPN customer gateway with id " + id + " because there is still related VPN connections!"); + throw new InvalidParameterValueException(String.format("Unable to delete VPN customer gateway %s because there is still related VPN connections!", gw)); } annotationDao.removeByEntityType(AnnotationService.EntityType.VPN_CUSTOMER_GATEWAY.name(), gw.getUuid()); _customerGatewayDao.remove(id); @@ -424,7 +422,7 @@ protected boolean doDeleteCustomerGateway(Site2SiteCustomerGateway gw) { protected void doDeleteVpnGateway(Site2SiteVpnGateway gw) { List conns = _vpnConnectionDao.listByVpnGatewayId(gw.getId()); if (!CollectionUtils.isEmpty(conns)) { - throw new InvalidParameterValueException("Unable to delete VPN gateway " + gw.getId() + " because there is still related VPN connections!"); + throw new InvalidParameterValueException(String.format("Unable to delete VPN gateway %s because there is still related VPN connections!", gw)); } _vpnGatewayDao.remove(gw.getId()); } @@ -546,7 +544,7 @@ private void setupVpnConnection(Account caller, Long vpnCustomerGwIp) { } catch (PermissionDeniedException e) { // Just don't restart this connection, as the user has no rights to it // Maybe should issue a notification to the system? - logger.info("Site2SiteVpnManager:updateCustomerGateway() Not resetting VPN connection " + conn.getId() + " as user lacks permission"); + logger.info("Site2SiteVpnManager:updateCustomerGateway() Not resetting VPN connection {} as user lacks permission", conn); continue; } @@ -814,7 +812,7 @@ public void markDisconnectVpnConnByVpc(long vpcId) { } Site2SiteVpnConnectionVO lock = _vpnConnectionDao.acquireInLockTable(conn.getId()); if (lock == null) { - throw new CloudRuntimeException("Unable to acquire lock on " + conn); + throw new CloudRuntimeException(String.format("Unable to acquire lock on vpn connection %s", conn)); } try { if (conn.getState() == Site2SiteVpnConnection.State.Connected || conn.getState() == Site2SiteVpnConnection.State.Connecting) { @@ -861,7 +859,7 @@ public void reconnectDisconnectedVpnByVpc(Long vpcId) { startVpnConnection(conn.getId()); } catch (ResourceUnavailableException e) { Site2SiteCustomerGatewayVO gw = _customerGatewayDao.findById(conn.getCustomerGatewayId()); - logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection " + conn.getId() + " which connect to " + gw.getName()); + logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection {} which connect to {}", conn, gw); } } } diff --git a/server/src/main/java/com/cloud/projects/ProjectManager.java b/server/src/main/java/com/cloud/projects/ProjectManager.java index 123284955fa9..5f58205208be 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManager.java +++ b/server/src/main/java/com/cloud/projects/ProjectManager.java @@ -35,7 +35,7 @@ public interface ProjectManager extends ProjectService { boolean canModifyProjectAccount(Account caller, long accountId); - boolean deleteAccountFromProject(long projectId, long accountId); + boolean deleteAccountFromProject(long projectId, Account account); List listPermittedProjectAccounts(long accountId); diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index 16e3925330dd..300c65a98426 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -379,7 +379,7 @@ public boolean deleteProject(Account caller, long callerUserId, final ProjectVO boolean updateResult = Transaction.execute(new TransactionCallback() { @Override public Boolean doInTransaction(TransactionStatus status) { - logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); + logger.debug("Marking project {} with state {} as a part of project delete...", project, State.Disabled); project.setState(State.Disabled); boolean updateResult = _projectDao.update(project.getId(), project); //owner can be already removed at this point, so adding the conditional check @@ -395,7 +395,7 @@ public Boolean doInTransaction(TransactionStatus status) { if (updateResult) { //pass system caller when clenaup projects account if (!cleanupProject(project, _accountDao.findById(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM)) { - logger.warn("Failed to cleanup project's id=" + project.getId() + " resources, not removing the project yet"); + logger.warn("Failed to cleanup project's ({}) resources, not removing the project yet", project); return false; } else { //check if any Tungsten-Fabric provider exists and delete the project from Tungsten-Fabric providers @@ -403,7 +403,7 @@ public Boolean doInTransaction(TransactionStatus status) { return _projectDao.remove(project.getId()); } } else { - logger.warn("Failed to mark the project id=" + project.getId() + " with state " + State.Disabled); + logger.warn("Failed to mark the project {} with state {}", project, State.Disabled); return false; } } @@ -413,7 +413,7 @@ private boolean cleanupProject(final Project project, AccountVO caller, Long cal boolean result = true; //Delete project's account AccountVO account = _accountDao.findById(project.getProjectAccountId()); - logger.debug("Deleting projects " + project + " internal account id=" + account.getId() + " as a part of project cleanup..."); + logger.debug("Deleting projects {} internal account {} as a part of project cleanup...", project, account); result = result && _accountMgr.deleteAccount(account, callerUserId, caller); @@ -482,20 +482,20 @@ public ProjectAccount assignUserToProject(Project project, long userId, long acc @Override @DB - public boolean deleteAccountFromProject(final long projectId, final long accountId) { + public boolean deleteAccountFromProject(final long projectId, final Account account) { return Transaction.execute(new TransactionCallback() { @Override public Boolean doInTransaction(TransactionStatus status) { boolean success = true; //remove account - ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountId); + ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, account.getId()); success = _projectAccountDao.remove(projectAccount.getId()); //remove all invitations for account if (success) { - logger.debug("Removed account " + accountId + " from project " + projectId + " , cleaning up old invitations for account/project..."); - ProjectInvitation invite = _projectInvitationDao.findByAccountIdProjectId(accountId, projectId); + logger.debug("Removed account {} from project {} , cleaning up old invitations for account/project...", account, projectId); + ProjectInvitation invite = _projectInvitationDao.findByAccountIdProjectId(account.getId(), projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); } @@ -572,7 +572,7 @@ public boolean addUserToProject(Long projectId, String username, String email, L ProjectAccount projectAccountUser = _projectAccountDao.findByProjectIdUserId(projectId, user.getAccountId(), user.getId()); if (projectAccountUser != null) { - logger.info("User with id: " + user.getId() + " is already added to the project with id: " + projectId); + logger.info("User: {} is already added to the project: {}", user, project); return true; } @@ -598,7 +598,7 @@ public boolean addUserToProject(Long projectId, String username, String email, L Optional.ofNullable(role).map(ProjectRole::getId).orElse(null)) != null) { return true; } - logger.warn("Failed to add user to project with id: " + projectId); + logger.warn("Failed to add user to project: {}", project); return false; } } @@ -691,8 +691,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Resour } Account currentOwnerAccount = getProjectOwner(projectId); if (currentOwnerAccount == null) { - logger.error("Unable to find the current owner for the project id=" + projectId); - throw new InvalidParameterValueException("Unable to find the current owner for the project id=" + projectId); + logger.error("Unable to find the current owner for the project {}", project); + throw new InvalidParameterValueException(String.format("Unable to find the current owner for the project %s", project)); } if (currentOwnerAccount.getId() != futureOwnerAccount.getId()) { ProjectAccountVO futureOwner = _projectAccountDao.findByProjectIdAccountId(projectId, futureOwnerAccount.getAccountId()); @@ -716,7 +716,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Resour _resourceLimitMgr.incrementResourceCount(futureOwnerAccount.getId(), ResourceType.project); } else { - logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); + logger.trace("Future owner {}is already the owner of the project {}", newOwnerName, project); } } } @@ -774,8 +774,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Resour } ProjectAccountVO newProjectUser = _projectAccountDao.findByProjectIdUserId(projectId, user.getAccountId(), userId); if (newProjectUser == null) { - throw new InvalidParameterValueException("User " + userId + - " doesn't belong to the project. Add it to the project first and then change the project's ownership"); + throw new InvalidParameterValueException(String.format("User %s doesn't belong to the project. Add it to the project first and then change the project's ownership", user)); } if (projectOwners.size() == 1 && newProjectUser.getUserId().equals(projectOwners.get(0).getUserId()) @@ -835,7 +834,7 @@ public boolean addAccountToProject(long projectId, String accountName, String em //Check if the account already added to the project ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, account.getId()); if (projectAccount != null) { - logger.debug("Account " + accountName + " already added to the project id=" + projectId); + logger.debug("Account {} already added to the project {}", accountName, project); return true; } } @@ -862,7 +861,7 @@ public boolean addAccountToProject(long projectId, String accountName, String em Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to add account " + accountName + " to project id=" + projectId); + logger.warn("Failed to add account {} to project {}", accountName, project); return false; } } @@ -874,7 +873,7 @@ private boolean inviteAccountToProject(Project project, Account account, String Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for account " + account.getAccountName() + " to project id=" + project); + logger.warn("Failed to generate invitation for account {} to project {}", account, project); return false; } } @@ -886,7 +885,7 @@ private boolean inviteAccountToProject(Project project, Account account, String Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email {} to project {}", email, project); return false; } } @@ -900,7 +899,7 @@ private boolean inviteUserToProject(Project project, User user, String email, Ro Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for account " + user.getUsername() + " to project id=" + project); + logger.warn("Failed to generate invitation for account {} to project {}", user, project); return false; } } else { @@ -910,7 +909,7 @@ private boolean inviteUserToProject(Project project, User user, String email, Ro Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email {} to project {}", email, project); return false; } } @@ -975,7 +974,7 @@ public boolean deleteAccountFromProject(long projectId, String accountName) { throw ex; } - return deleteAccountFromProject(projectId, account.getId()); + return deleteAccountFromProject(projectId, account); } @Override @@ -1028,9 +1027,9 @@ private void deletePendingInvite(Long projectId, User user) { if (invite != null) { boolean success = _projectInvitationDao.remove(invite.getId()); if (success){ - logger.info("Successfully deleted invite pending for the user : "+user.getUsername()); + logger.info("Successfully deleted invite pending for the user : {}", user); } else { - logger.info("Failed to delete project invite for user: "+ user.getUsername()); + logger.info("Failed to delete project invite for user: {}", user); } } } @@ -1045,7 +1044,7 @@ public Boolean doInTransaction(TransactionStatus status) { success = _projectAccountDao.remove(projectAccount.getId()); if (success) { - logger.debug("Removed user " + user.getId() + " from project. Removing any invite sent to the user"); + logger.debug("Removed user {} from project. Removing any invite sent to the user", user); ProjectInvitation invite = _projectInvitationDao.findByUserIdProjectId(user.getId(), user.getAccountId(), projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); @@ -1118,7 +1117,7 @@ public Boolean doInTransaction(TransactionStatus status) { public ProjectInvitation generateTokenBasedInvitation(Project project, Long userId, String email, String token, Role role, Long projectRoleId) { //verify if the invitation was already generated if (activeInviteExists(project, null, null, email)) { - throw new InvalidParameterValueException("There is already a pending invitation for email " + email + " to the project id=" + project); + throw new InvalidParameterValueException(String.format("There is already a pending invitation for email %s to the project %s", email, project)); } ProjectInvitationVO projectInvitationVO = new ProjectInvitationVO(project.getId(), null, project.getDomainId(), email, token); @@ -1136,7 +1135,7 @@ public ProjectInvitation generateTokenBasedInvitation(Project project, Long user try { sendInvite(token, email, project.getId()); } catch (Exception ex) { - logger.warn("Failed to send project id=" + project + " invitation to the email " + email + "; removing the invitation record from the db", ex); + logger.warn("Failed to send project {} invitation to the email {}; removing the invitation record from the db", project, email, ex); _projectInvitationDao.remove(projectInvitation.getId()); return null; } @@ -1166,7 +1165,7 @@ protected void sendInvite(String token, String email, long projectId) throws Mes } private boolean expireInvitation(ProjectInvitationVO invite) { - logger.debug("Expiring invitation id=" + invite.getId()); + logger.debug("Expiring invitation {}", invite); invite.setState(ProjectInvitation.State.Expired); return _projectInvitationDao.update(invite.getId(), invite); } @@ -1227,7 +1226,7 @@ public boolean updateInvitation(final long projectId, String accountName, Long u if (invite != null) { if (!_projectInvitationDao.isActive(invite.getId(), _invitationTimeOut) && accept) { expireInvitation(invite); - throw new InvalidParameterValueException("Invitation is expired for account id=" + accountName + " to the project id=" + projectId); + throw new InvalidParameterValueException(String.format("Invitation is expired for account id=%s to the project %s", accountName, project)); } else { final ProjectInvitationVO inviteFinal = invite; final Long accountIdFinal = invite.getAccountId() != -1 ? invite.getAccountId() : accountId; @@ -1250,14 +1249,14 @@ public Boolean doInTransaction(TransactionStatus status) { if (inviteFinal.getForUserId() == -1) { ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountIdFinal); if (projectAccount != null) { - logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); + logger.debug("Account {} already added to the project {}", accountNameFinal, project); } else { assignAccountToProject(project, accountIdFinal, inviteFinal.getAccountRole(), null, inviteFinal.getProjectRoleId()); } } else { ProjectAccount projectAccount = _projectAccountDao.findByProjectIdUserId(projectId, finalUser.getAccountId(), finalUser.getId()); if (projectAccount != null) { - logger.debug("User " + finalUser.getId() + "has already been added to the project id=" + projectId); + logger.debug("User {} has already been added to the project {}", finalUser, project); } else { assignUserToProject(project, inviteFinal.getForUserId(), finalUser.getAccountId(), inviteFinal.getAccountRole(), inviteFinal.getProjectRoleId()); } @@ -1270,7 +1269,7 @@ public Boolean doInTransaction(TransactionStatus status) { }); } } else { - throw new InvalidParameterValueException("Unable to find invitation for account name=" + accountName + " to the project id=" + projectId); + throw new InvalidParameterValueException(String.format("Unable to find invitation for account name=%s to the project id=%s", accountName, project)); } return result; @@ -1312,7 +1311,7 @@ public Project activateProject(final long projectId) { Project.State currentState = project.getState(); if (currentState == State.Active) { - logger.debug("The project id=" + projectId + " is already active, no need to activate it again"); + logger.debug("The project {} is already active, no need to activate it again", project); return project; } @@ -1350,7 +1349,7 @@ public Project suspendProject(long projectId) throws ConcurrentOperationExceptio _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (suspendProject(project)) { - logger.debug("Successfully suspended project id=" + projectId); + logger.debug("Successfully suspended project {}", project); return _projectDao.findById(projectId); } else { CloudRuntimeException ex = new CloudRuntimeException("Failed to suspend project with specified id"); @@ -1406,10 +1405,10 @@ public boolean deleteProjectInvitation(long id) { _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (_projectInvitationDao.remove(id)) { - logger.debug("Project Invitation id=" + id + " is removed"); + logger.debug("Project Invitation {} is removed", invitation); return true; } else { - logger.debug("Failed to remove project invitation id=" + id); + logger.debug("Failed to remove project invitation {}", invitation); return false; } } @@ -1425,7 +1424,7 @@ protected void runInContext() { for (ProjectInvitationVO invitationToExpire : invitationsToExpire) { invitationToExpire.setState(ProjectInvitation.State.Expired); _projectInvitationDao.update(invitationToExpire.getId(), invitationToExpire); - logger.trace("Expired project invitation id=" + invitationToExpire.getId()); + logger.trace("Expired project invitation {}", invitationToExpire); } } } catch (Exception ex) { diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 228373896204..e0340385a66f 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -451,9 +451,6 @@ public List discoverCluster(final AddClusterCmd cmd) throws I } final HostPodVO pod = _podDao.findById(podId); - if (pod == null) { - throw new InvalidParameterValueException("Can't find pod with specified podId " + podId); - } // Check if the pod exists in the system if (_podDao.findById(podId) == null) { @@ -461,7 +458,7 @@ public List discoverCluster(final AddClusterCmd cmd) throws I } // check if pod belongs to the zone if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) { - final InvalidParameterValueException ex = new InvalidParameterValueException("Pod with specified id doesn't belong to the zone " + dcId); + final InvalidParameterValueException ex = new InvalidParameterValueException(String.format("Pod with specified id doesn't belong to the zone %s", zone)); ex.addProxyObject(pod.getUuid(), "podId"); ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; @@ -687,15 +684,16 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } // Check if the pod exists in the system + HostPodVO pod = null; if (podId != null) { - final HostPodVO pod = _podDao.findById(podId); + pod = _podDao.findById(podId); if (pod == null) { throw new InvalidParameterValueException("Can't find pod by id " + podId); } // check if pod belongs to the zone if (!Long.valueOf(pod.getDataCenterId()).equals(dcId)) { final InvalidParameterValueException ex = - new InvalidParameterValueException("Pod with specified podId" + podId + " doesn't belong to the zone with specified zoneId" + dcId); + new InvalidParameterValueException(String.format("Pod with specified pod %s doesn't belong to the zone with specified zone %s", pod, zone)); ex.addProxyObject(pod.getUuid(), "podId"); ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; @@ -727,8 +725,10 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } } + ClusterVO cluster = null; if (clusterId != null) { - if (_clusterDao.findById(clusterId) == null) { + cluster = _clusterDao.findById(clusterId); + if (cluster == null) { throw new InvalidParameterValueException("Can't find cluster by id " + clusterId); } @@ -761,11 +761,10 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } if (clusterName != null) { - final HostPodVO pod = _podDao.findById(podId); if (pod == null) { throw new InvalidParameterValueException("Can't find pod by id " + podId); } - ClusterVO cluster = new ClusterVO(dcId, podId, clusterName); + cluster = new ClusterVO(dcId, podId, clusterName); cluster.setHypervisorType(hypervisorType); try { cluster = _clusterDao.persist(cluster); @@ -810,7 +809,7 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } final List hosts = new ArrayList(); - logger.info("Trying to add a new host at " + url + " in data center " + dcId); + logger.info("Trying to add a new host at {} in data center {}", url, zone); boolean isHypervisorTypeSupported = false; for (final Discoverer discoverer : _discoverers) { if (params != null) { @@ -828,7 +827,7 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c resources = discoverer.find(dcId, podId, clusterId, uri, username, password, hostTags); } catch (final DiscoveryException e) { String errorMsg = String.format("Could not add host at [%s] with zone [%s], pod [%s] and cluster [%s] due to: [%s].", - uri, dcId, podId, clusterId, e.getMessage()); + uri, zone, pod, cluster, e.getMessage()); if (logger.isDebugEnabled()) { logger.debug(errorMsg, e); } @@ -925,8 +924,8 @@ protected boolean doDeleteHost(final long hostId, final boolean isForced, final isForceDeleteStorage); if (answer == null) { - throw new CloudRuntimeException("No resource adapter respond to DELETE_HOST event for " + host.getName() + " id = " + hostId + ", hypervisorType is " + - host.getHypervisorType() + ", host type is " + host.getType()); + throw new CloudRuntimeException(String.format("No resource adapter respond to DELETE_HOST event for %s, hypervisorType is %s, host type is %s", + host, host.getHypervisorType(), host.getType())); } if (answer.getIsException()) { @@ -1001,7 +1000,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { storagePool.setClusterId(null); _storagePoolDao.update(poolId, storagePool); _storagePoolDao.remove(poolId); - logger.debug(String.format("Local storage [id: %s] is removed as a part of %s removal", poolId, hostRemoved.toString())); + logger.debug("Local storage [id: {}] is removed as a part of {} removal", storagePool, hostRemoved); } } @@ -1092,9 +1091,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List hosts = listAllHostsInCluster(cmd.getId()); if (hosts.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); + logger.debug("Cluster: {} still has hosts, can't remove", cluster); } - throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); + throw new CloudRuntimeException(String.format("Cluster: %s cannot be removed. Cluster still has hosts", cluster)); } // don't allow to remove the cluster if it has non-removed storage @@ -1102,9 +1101,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List storagePools = _storagePoolDao.listPoolsByCluster(cmd.getId()); if (storagePools.size() > 0) { if (logger.isDebugEnabled()) { - logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); + logger.debug("Cluster: {} still has storage pools, can't remove", cluster); } - throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has storage pools"); + throw new CloudRuntimeException(String.format("Cluster: %s cannot be removed. Cluster still has storage pools", cluster)); } if (_clusterDao.remove(cmd.getId())) { @@ -1130,7 +1129,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - logger.error("Unable to delete cluster: " + cmd.getId(), t); + logger.error("Unable to delete cluster: {}", _clusterDao.findById(cmd.getId()), t); return false; } } @@ -1294,7 +1293,7 @@ public Host cancelMaintenance(final CancelMaintenanceCmd cmd) { } if (!ResourceState.isMaintenanceState(host.getResourceState())) { - throw new CloudRuntimeException("Cannot perform cancelMaintenance when resource state is " + host.getResourceState() + ", hostId = " + hostId); + throw new CloudRuntimeException(String.format("Cannot perform cancelMaintenance when resource state is %s, host: %s", host.getResourceState(), host)); } processResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_BEFORE, hostId); @@ -1347,25 +1346,25 @@ private void handleVmForLastHostOrWithVGpu(final HostVO host, final VMInstanceVO // for the last host in this cluster, destroy SSVM/CPVM and stop all other VMs if (VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType()) || VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { - logger.error(String.format("Maintenance: VM is of type %s. Destroying VM %s (ID: %s) immediately instead of migration.", vm.getType().toString(), vm.getInstanceName(), vm.getUuid())); + logger.error("Maintenance: VM is of type {}. Destroying VM {} immediately instead of migration.", vm.getType(), vm); _haMgr.scheduleDestroy(vm, host.getId()); return; } - logger.error(String.format("Maintenance: No hosts available for migrations. Scheduling shutdown for VM %s instead of migration.", vm.getUuid())); + logger.error("Maintenance: No hosts available for migrations. Scheduling shutdown for VM {} instead of migration.", vm); _haMgr.scheduleStop(vm, host.getId(), WorkType.ForceStop); } private boolean doMaintain(final long hostId) { final HostVO host = _hostDao.findById(hostId); - logger.info("Maintenance: attempting maintenance of host " + host.getUuid()); + logger.info("Maintenance: attempting maintenance of host {}", host); ResourceState hostState = host.getResourceState(); if (!ResourceState.canAttemptMaintenance(hostState)) { - throw new CloudRuntimeException("Cannot perform maintain when resource state is " + hostState + ", hostId = " + hostId); + throw new CloudRuntimeException(String.format("Cannot perform maintain when resource state is %s, host = %s", hostState, host)); } final MaintainAnswer answer = (MaintainAnswer)_agentMgr.easySend(hostId, new MaintainCommand()); if (answer == null || !answer.getResult()) { - logger.warn("Unable to send MaintainCommand to host: " + hostId); + logger.warn("Unable to send MaintainCommand to host: {}", host); return false; } @@ -1377,7 +1376,7 @@ private boolean doMaintain(final long hostId) { throw new CloudRuntimeException(err + e.getMessage()); } - ActionEventUtils.onStartedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), EventTypes.EVENT_MAINTENANCE_PREPARE, "starting maintenance for host " + hostId, hostId, null, true, 0); + ActionEventUtils.onStartedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), EventTypes.EVENT_MAINTENANCE_PREPARE, String.format("starting maintenance for host %s", host), hostId, null, true, 0); _agentMgr.pullAgentToMaintenance(hostId); /* TODO: move below to listener */ @@ -1390,7 +1389,7 @@ private boolean doMaintain(final long hostId) { List hosts = listAllUpAndEnabledHosts(Host.Type.Routing, host.getClusterId(), host.getPodId(), host.getDataCenterId()); if (CollectionUtils.isEmpty(hosts)) { - logger.warn("Unable to find a host for vm migration in cluster: " + host.getClusterId()); + logger.warn("Unable to find a host for vm migration in cluster: {}", _clusterDao.findById(host.getClusterId())); if (! isClusterWideMigrationPossible(host, vms, hosts)) { return false; } @@ -1417,7 +1416,7 @@ private boolean doMaintain(final long hostId) { throw new CloudRuntimeException("There are active VMs using the host's local storage pool. Please stop all VMs on this host that use local storage."); } } else { - logger.info("Maintenance: scheduling migration of VM " + vm.getUuid() + " from host " + host.getUuid()); + logger.info("Maintenance: scheduling migration of VM {} from host {}", vm, host); _haMgr.scheduleMigration(vm); } } @@ -1427,7 +1426,8 @@ private boolean doMaintain(final long hostId) { private boolean isClusterWideMigrationPossible(Host host, List vms, List hosts) { if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId())) { - logger.info("Looking for hosts across different clusters in zone: " + host.getDataCenterId()); + DataCenterVO zone = _dcDao.findById(host.getDataCenterId()); + logger.info("Looking for hosts across different clusters in zone: {}", zone); Long podId = null; for (final VMInstanceVO vm : vms) { if (VirtualMachine.systemVMs.contains(vm.getType())) { @@ -1438,7 +1438,7 @@ private boolean isClusterWideMigrationPossible(Host host, List vms } hosts.addAll(listAllUpAndEnabledHosts(Host.Type.Routing, null, podId, host.getDataCenterId())); if (CollectionUtils.isEmpty(hosts)) { - logger.warn("Unable to find a host for vm migration in zone: " + host.getDataCenterId()); + logger.warn("Unable to find a host for vm migration in zone: {}", zone); return false; } logger.info("Found hosts in the zone for vm migration: " + hosts); @@ -1472,17 +1472,15 @@ private void migrateAwayVmWithVolumes(HostVO host, VMInstanceVO vm) { try { dest = deploymentManager.planDeployment(profile, plan, new DeploymentPlanner.ExcludeList(), null); } catch (InsufficientServerCapacityException e) { - throw new CloudRuntimeException(String.format("Maintenance failed, could not find deployment destination for VM [id=%s, name=%s].", vm.getId(), vm.getInstanceName()), - e); + throw new CloudRuntimeException(String.format("Maintenance failed, could not find deployment destination for VM: %s.", vm), e); } Host destHost = dest.getHost(); try { _vmMgr.migrateWithStorage(vm.getUuid(), host.getId(), destHost.getId(), null); } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException( - String.format("Maintenance failed, could not migrate VM [id=%s, name=%s] with local storage from host [id=%s, name=%s] to host [id=%s, name=%s].", vm.getId(), - vm.getInstanceName(), host.getId(), host.getName(), destHost.getId(), destHost.getName()), e); + throw new CloudRuntimeException(String.format("Maintenance failed, could not migrate VM (%s) with local storage from host (%s) to host (%s).", + vm, host, destHost), e); } } @@ -1521,8 +1519,9 @@ public Host maintain(final PrepareForMaintenanceCmd cmd) { } if (_hostDao.countBy(host.getClusterId(), ResourceState.PrepareForMaintenance, ResourceState.ErrorInPrepareForMaintenance) > 0) { - throw new CloudRuntimeException("There are other servers attempting migrations for maintenance. " + - "Found hosts in PrepareForMaintenance OR ErrorInPrepareForMaintenance STATUS in cluster " + host.getClusterId()); + throw new CloudRuntimeException(String.format("There are other servers attempting migrations for maintenance. " + + "Found hosts in PrepareForMaintenance OR ErrorInPrepareForMaintenance STATUS in cluster %s", + _clusterDao.findById(host.getClusterId()))); } if (_storageMgr.isLocalStorageActiveOnHost(host.getId())) { @@ -1551,10 +1550,10 @@ public Host maintain(final PrepareForMaintenanceCmd cmd) { processResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, hostId); return _hostDao.findById(hostId); } else { - throw new CloudRuntimeException("Unable to prepare for maintenance host " + hostId); + throw new CloudRuntimeException(String.format("Unable to prepare for maintenance host %s", host)); } } catch (final AgentUnavailableException e) { - throw new CloudRuntimeException("Unable to prepare for maintenance host " + hostId); + throw new CloudRuntimeException(String.format("Unable to prepare for maintenance host %s", host)); } } @@ -1594,28 +1593,27 @@ public Host declareHostAsDegraded(final DeclareHostAsDegradedCmd cmd) throws NoT if (host == null || StringUtils.isBlank(host.getName())) { throw new InvalidParameterValueException(String.format("Host [id:%s] does not exist.", hostId)); } else if (host.getRemoved() != null){ - throw new InvalidParameterValueException(String.format("Host [id:%s, name:%s] does not exist or it has been removed.", hostId, host.getName())); + throw new InvalidParameterValueException(String.format("Host [id:%s, uuid: %s, name:%s] does not exist or it has been removed.", hostId, host.getUuid(), host.getName())); } if (host.getResourceState() == ResourceState.Degraded) { - throw new NoTransitionException(String.format("Host [id:%s] was already marked as Degraded.", host.getId())); + throw new NoTransitionException(String.format("Host (%s) was already marked as Degraded.", host)); } if (host.getStatus() != Status.Alert && host.getStatus() != Status.Disconnected) { - throw new InvalidParameterValueException( - String.format("Cannot perform declare host [id=%s, name=%s] as 'Degraded' when host is in %s status", host.getId(), host.getName(), host.getStatus())); + throw new InvalidParameterValueException(String.format("Cannot perform declare host (%s) as 'Degraded' when host is in %s status", host, host.getStatus())); } try { resourceStateTransitTo(host, ResourceState.Event.DeclareHostDegraded, _nodeId); host.setResourceState(ResourceState.Degraded); } catch (NoTransitionException e) { - logger.error(String.format("Cannot transmit host [id:%s, name:%s, state:%s, status:%s] to %s state", host.getId(), host.getName(), host.getState(), host.getStatus(), - ResourceState.Event.DeclareHostDegraded), e); + logger.error("Cannot transmit host [id:{}, uuid: {}, name:{}, state:{}, status:{}] to {} state", + host.getId(), host.getUuid(), host.getName(), host.getState(), host.getStatus(), ResourceState.Event.DeclareHostDegraded, e); throw e; } - scheduleVmsRestart(hostId); + scheduleVmsRestart(host); return host; } @@ -1623,13 +1621,13 @@ public Host declareHostAsDegraded(final DeclareHostAsDegradedCmd cmd) throws NoT /** * This method assumes that the host is Degraded; therefore it schedule VMs to be re-started by the HA manager. */ - private void scheduleVmsRestart(Long hostId) { - List allVmsOnHost = _vmDao.listByHostId(hostId); + private void scheduleVmsRestart(Host host) { + List allVmsOnHost = _vmDao.listByHostId(host.getId()); if (CollectionUtils.isEmpty(allVmsOnHost)) { - logger.debug(String.format("Host [id=%s] was marked as Degraded with no allocated VMs, no need to schedule VM restart", hostId)); + logger.debug("Host ({}) was marked as Degraded with no allocated VMs, no need to schedule VM restart", host); } - logger.debug(String.format("Host [id=%s] was marked as Degraded with a total of %s allocated VMs. Triggering HA to start VMs that have HA enabled.", hostId, allVmsOnHost.size())); + logger.debug("Host ({}) was marked as Degraded with a total of {} allocated VMs. Triggering HA to start VMs that have HA enabled.", host, allVmsOnHost.size()); for (VMInstanceVO vm : allVmsOnHost) { State vmState = vm.getState(); if (vmState == State.Starting || vmState == State.Running || vmState == State.Stopping) { @@ -1647,12 +1645,12 @@ public Host cancelHostAsDegraded(final CancelHostAsDegradedCmd cmd) throws NoTra HostVO host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - throw new InvalidParameterValueException(String.format("Host [id=%s] does not exist", host.getId())); + throw new InvalidParameterValueException(String.format("Host (%s) with id %d does not exist", host, hostId)); } if (host.getResourceState() != ResourceState.Degraded) { throw new NoTransitionException( - String.format("Cannot perform cancelHostAsDegraded on host [id=%s, name=%s] when host is in %s state", host.getId(), host.getName(), host.getResourceState())); + String.format("Cannot perform cancelHostAsDegraded on host (%s) when host is in %s state", host, host.getResourceState())); } try { @@ -1660,7 +1658,7 @@ public Host cancelHostAsDegraded(final CancelHostAsDegradedCmd cmd) throws NoTra host.setResourceState(ResourceState.Enabled); } catch (NoTransitionException e) { throw new NoTransitionException( - String.format("Cannot transmit host [id=%s, name=%s, state=%s, status=%s] to %s state", host.getId(), host.getName(), host.getResourceState(), host.getStatus(), + String.format("Cannot transmit host (id=%s, uuid=%s, name=%s, state=%s, status=%s] to %s state", host.getId(), host.getUuid(), host.getName(), host.getResourceState(), host.getStatus(), ResourceState.Enabled)); } return host; @@ -1694,11 +1692,11 @@ protected void configureVncAccessForKVMHostFailedMigrations(HostVO host, List errorVms) throws NoTransitionException { - logger.debug("Unable to migrate / fix errors for " + errorVms.size() + " VM(s) from host " + host.getUuid()); + logger.debug("Unable to migrate / fix errors for {} VM(s) from host {}", errorVms.size(), host); _haMgr.cancelScheduledMigrations(host); configureVncAccessForKVMHostFailedMigrations(host, errorVms); resourceStateTransitTo(host, ResourceState.Event.UnableToMaintain, _nodeId); @@ -1716,14 +1714,14 @@ protected boolean setHostIntoErrorInMaintenance(HostVO host, List } protected boolean setHostIntoErrorInPrepareForMaintenance(HostVO host, List errorVms) throws NoTransitionException { - logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenanceWithErrors state"); + logger.debug("Host {} entering in PrepareForMaintenanceWithErrors state", host); configureVncAccessForKVMHostFailedMigrations(host, errorVms); resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); return false; } protected boolean setHostIntoPrepareForMaintenanceAfterErrorsFixed(HostVO host) throws NoTransitionException { - logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenance state as any previous corrections have been fixed"); + logger.debug("Host {} entering in PrepareForMaintenance state as any previous corrections have been fixed", host); resourceStateTransitTo(host, ResourceState.Event.ErrorsCorrected, _nodeId); return false; } @@ -2753,8 +2751,8 @@ public HostVO fillRoutingHostVO(final HostVO host, final StartupRoutingCommand s final ClusterVO clusterVO = _clusterDao.findById(host.getClusterId()); if (clusterVO.getHypervisorType() != hyType) { - throw new IllegalArgumentException("Can't add host whose hypervisor type is: " + hyType + " into cluster: " + clusterVO.getId() + - " whose hypervisor type is: " + clusterVO.getHypervisorType()); + throw new IllegalArgumentException(String.format("Can't add host whose hypervisor type is: %s into cluster: %s whose hypervisor type is: %s", + hyType, clusterVO, clusterVO.getHypervisorType())); } CPU.CPUArch hostCpuArch = CPU.CPUArch.fromType(ssCmd.getCpuArch()); if (hostCpuArch != null && clusterVO.getArch() != null && hostCpuArch != clusterVO.getArch()) { @@ -2793,7 +2791,7 @@ public HostVO fillRoutingHostVO(final HostVO host, final StartupRoutingCommand s @Override public void deleteRoutingHost(final HostVO host, final boolean isForced, final boolean forceDestroyStorage) throws UnableDeleteHostException { if (host.getType() != Host.Type.Routing) { - throw new CloudRuntimeException(String.format("Non-Routing host gets in deleteRoutingHost, id is %s", host.getId())); + throw new CloudRuntimeException(String.format("Non-Routing host (%s) gets in deleteRoutingHost", host)); } if (logger.isDebugEnabled()) { @@ -2838,7 +2836,7 @@ public void deleteRoutingHost(final HostVO host, final boolean isForced, final b try { resourceStateTransitTo(host, ResourceState.Event.DeleteHost, host.getId()); } catch (final NoTransitionException e) { - logger.debug("Cannot transmit host " + host.getId() + " to Disabled state", e); + logger.debug("Cannot transmit host {} to Disabled state", host, e); } for (final VMInstanceVO vm : vms) { if ((! HighAvailabilityManager.ForceHA.value() && !vm.isHaEnabled()) || vm.getState() == State.Stopping) { @@ -2876,7 +2874,7 @@ private boolean doCancelMaintenance(final long hostId) { * really prefer to exception that always exposes bugs */ if (!ResourceState.isMaintenanceState(host.getResourceState())) { - throw new CloudRuntimeException("Cannot perform cancelMaintenance when resource state is " + host.getResourceState() + ", hostId = " + hostId); + throw new CloudRuntimeException(String.format("Cannot perform cancelMaintenance when resource state is %s, host = %s", host.getResourceState(), host)); } /* TODO: move to listener */ @@ -2886,7 +2884,7 @@ private boolean doCancelMaintenance(final long hostId) { final List vms = _haMgr.findTakenMigrationWork(); for (final VMInstanceVO vm : vms) { if (vm.getHostId() != null && vm.getHostId() == hostId) { - logger.warn("Unable to cancel migration because the vm is being migrated: " + vm + ", hostId = " + hostId); + logger.warn("Unable to cancel migration because the vm is being migrated: {}, host {}", vm, host); vms_migrating = true; } } @@ -3137,8 +3135,7 @@ public boolean migrateAwayFailed(final long hostId, final long vmId) { return false; } else { try { - logger.warn("Migration of VM " + _vmDao.findById(vmId) + " failed from host " + _hostDao.findById(hostId) + - ". Emitting event UnableToMigrate."); + logger.warn("Migration of VM {} failed from host {}. Emitting event UnableToMigrate.", _vmDao.findById(vmId), host); return resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); } catch (final NoTransitionException e) { logger.debug(String.format("No next resource state for %s while current state is [%s] with event %s", host, host.getResourceState(), ResourceState.Event.UnableToMigrate), e); @@ -3316,14 +3313,15 @@ public HostVO findHostByName(final String name) { @Override public HostStats getHostStatistics(final long hostId) { - final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(_hostDao.findById(hostId).getGuid(), _hostDao.findById(hostId).getName(), hostId)); + HostVO host = _hostDao.findById(hostId); + final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(host.getGuid(), host.getName(), hostId)); if (answer != null && answer instanceof UnsupportedAnswer) { return null; } if (answer == null || !answer.getResult()) { - final String msg = "Unable to obtain host " + hostId + " statistics. "; + final String msg = String.format("Unable to obtain host %s statistics. ", host); logger.warn(msg); return null; } else { @@ -3449,12 +3447,12 @@ public List listAllHostsInOneZoneNotInClusterByHypervisors(List gpuDeviceList = listAvailableGPUDevice(hostId, groupName, vgpuType); if (CollectionUtils.isEmpty(gpuDeviceList)) { - final String errorMsg = "Host " + hostId + " does not have required GPU device or out of capacity. GPU group: " + groupName + ", vGPU Type: " + vgpuType; + final String errorMsg = String.format("Host %s does not have required GPU device or out of capacity. GPU group: %s, vGPU Type: %s", _hostDao.findById(hostId), groupName, vgpuType); logger.error(errorMsg); throw new CloudRuntimeException(errorMsg); } @@ -3536,7 +3534,7 @@ public Boolean doInTransaction(final TransactionStatus status) { final PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true); if (hostReservation == null) { if (logger.isDebugEnabled()) { - logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + logger.debug("Host reservation for host: {} does not even exist. Release reservartion call is ignored.", () -> _hostDao.findById(hostId)); } return false; } @@ -3546,7 +3544,7 @@ public Boolean doInTransaction(final TransactionStatus status) { } if (logger.isDebugEnabled()) { - logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + logger.debug("Host reservation for host: {} does not even exist. Release reservartion call is ignored.", () -> _hostDao.findById(hostId)); } return false; @@ -3555,7 +3553,7 @@ public Boolean doInTransaction(final TransactionStatus status) { } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - logger.error("Unable to release host reservation for host: " + hostId, t); + logger.error("Unable to release host reservation for host: {}", _hostDao.findById(hostId), t); return false; } } diff --git a/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java b/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java index 943c68c7c8dd..6c286edd00dc 100644 --- a/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java +++ b/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java @@ -181,7 +181,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { Pair accountDomainPair = getAccountDomain(id, resourceType); Long domainId = accountDomainPair.second(); Long accountId = accountDomainPair.first(); - resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource ' %s ", caller, id)); + resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource [id: %s, uuid: %s] ", caller, id, resourceUuid)); if (existingResourceIcon == null) { resourceIcon = new ResourceIconVO(id, resourceType, resourceUuid, base64Image); @@ -221,7 +221,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { Pair accountDomainPair = getAccountDomain(id, resourceType); Long domainId = accountDomainPair.second(); Long accountId = accountDomainPair.first(); - resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource ' %s ", caller, id)); + resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource [id: %s, uuid: %s]", caller, id, resourceId)); resourceIconDao.remove(resourceIcon.getId()); logger.debug("Removed icon for resources (" + String.join(", ", resourceIds) + ")"); diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index b59ddc029ee1..f37b661c22f7 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -932,7 +932,7 @@ public ResourceLimitVO updateResourceLimit(Long accountId, Long domainId, Intege if ((caller.getAccountId() == accountId.longValue()) && (_accountMgr.isDomainAdmin(caller.getId()) || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN)) { // If the admin is trying to update their own account, disallow. - throw new PermissionDeniedException("Unable to update resource limit for their own account " + accountId + ", permission denied"); + throw new PermissionDeniedException(String.format("Unable to update resource limit for their own account %s, permission denied", account)); } if (account.getType() == Account.Type.PROJECT) { @@ -976,8 +976,7 @@ public ResourceLimitVO updateResourceLimit(Long accountId, Long domainId, Intege DomainVO parentDomain = _domainDao.findById(parentDomainId); long parentMaximum = findCorrectResourceLimitForDomain(parentDomain, resourceType, tag); if ((parentMaximum >= 0) && (max.longValue() > parentMaximum)) { - throw new InvalidParameterValueException("Domain " + domain.getName() + "(id: " + parentDomain.getId() + ") has maximum allowed resource limit " + parentMaximum + " for " - + resourceType + ", please specify a value less than or equal to " + parentMaximum); + throw new InvalidParameterValueException(String.format("Domain %s has maximum allowed resource limit %d for %s, please specify a value less than or equal to %d", parentDomain, parentMaximum, resourceType, parentMaximum)); } } ownerType = ResourceOwnerType.Domain; @@ -1012,7 +1011,7 @@ protected void removeResourceLimitAndCountForNonMatchingTags(Long ownerId, Resou "host tags: %s, storage tags: %s", StringUtils.join(hostTags), StringUtils.join(storageTags)); if (ObjectUtils.allNotNull(ownerId, ownerType)) { - msg = String.format("%s for %s ID: %d", msg, ownerType.getName().toLowerCase(), ownerId); + msg = String.format("%s for %s", msg, ownerType == ResourceOwnerType.Account ? _accountDao.findById(ownerId) : _domainDao.findById(ownerId)); } logger.debug(msg); } diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 7926498c1239..0c836d7347db 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -1395,7 +1395,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { for (ResourceType resourceType : resourceTypes) { if (!domainCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, domain.getId(), ResourceOwnerType.Domain); - logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); + logger.debug("Inserting resource count of type {} for domain {}", resourceType, domain); _resourceCountDao.persist(resourceCountVO); } } @@ -1424,7 +1424,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { for (ResourceType resourceType : resourceTypes) { if (!accountCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, account.getId(), ResourceOwnerType.Account); - logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); + logger.debug("Inserting resource count of type {} for account {}", resourceType, account); _resourceCountDao.persist(resourceCountVO); } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 2062ee1e94d4..9c19009a50e8 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -2521,7 +2521,7 @@ public Pair, Integer> searchForIPAddresses(final ListP freeAddrs.addAll(_ipAddressMgr.listAvailablePublicIps(dcId, null, vlanDbIds, owner, VlanType.VirtualNetwork, associatedNetworkId, false, false, false, null, null, false, cmd.getVpcId(), cmd.isDisplay(), false, false)); // Free } catch (InsufficientAddressCapacityException e) { - logger.warn("no free address is found in zone " + dcId); + logger.warn("no free address is found in zone {}", dc); } } for (IPAddressVO addr: freeAddrs) { @@ -3056,8 +3056,8 @@ public boolean removeGuestOsMapping(final RemoveGuestOsMappingCmd cmd) { } - protected ConsoleProxyInfo getConsoleProxyForVm(final long dataCenterId, final long userVmId) { - return _consoleProxyMgr.assignProxy(dataCenterId, userVmId); + protected ConsoleProxyInfo getConsoleProxyForVm(final long dataCenterId, final VMInstanceVO userVm) { + return _consoleProxyMgr.assignProxy(dataCenterId, userVm); } private ConsoleProxyVO startConsoleProxy(final long instanceId) { @@ -3092,7 +3092,7 @@ protected ConsoleProxyVO destroyConsoleProxy(final long instanceId) { public String getConsoleAccessUrlRoot(final long vmId) { final VMInstanceVO vm = _vmInstanceDao.findById(vmId); if (vm != null) { - final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vmId); + final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vm); if (proxy != null) { return proxy.getProxyImageUrl(); } @@ -3106,7 +3106,7 @@ public Pair setConsoleAccessForVm(long vmId, String sessionUuid if (vm == null) { return new Pair<>(false, "Cannot find a VM with id = " + vmId); } - final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vmId); + final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vm); if (proxy == null) { return new Pair<>(false, "Cannot find a console proxy for the VM " + vmId); } @@ -3137,7 +3137,7 @@ public Pair setConsoleAccessForVm(long vmId, String sessionUuid public String getConsoleAccessAddress(long vmId) { final VMInstanceVO vm = _vmInstanceDao.findById(vmId); if (vm != null) { - final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vmId); + final ConsoleProxyInfo proxy = getConsoleProxyForVm(vm.getDataCenterId(), vm); return proxy != null ? proxy.getProxyAddress() : null; } return null; @@ -5028,8 +5028,8 @@ public String getVMPassword(GetVMPasswordCmd cmd) { String password = vm.getDetail("Encrypted.Password"); if (StringUtils.isEmpty(password)) { - throw new InvalidParameterValueException(String.format("No password found for VM with id [%s]. When the VM's SSH keypair is changed, the current encrypted password is " - + "removed due to incosistency in the encryptation, as the new SSH keypair is different from which the password was encrypted. To get a new password, it must be reseted.", vmId)); + throw new InvalidParameterValueException(String.format("No password found for VM [%s]. When the VM's SSH keypair is changed, the current encrypted password is " + + "removed due to inconsistency in the encryption, as the new SSH keypair is different from which the password was encrypted. To get a new password, it must be reseted.", vm)); } return password; @@ -5049,7 +5049,7 @@ private boolean updateHostsInCluster(final UpdateHostPasswordCmd command) { public void doInTransactionWithoutResult(final TransactionStatus status) { for (final HostVO h : hosts) { if (logger.isDebugEnabled()) { - logger.debug("Changing password for host name = " + h.getName()); + logger.debug("Changing password for host {}", h); } // update password for this host final DetailVO nv = _detailsDao.findDetail(h.getId(), ApiConstants.USERNAME); @@ -5116,7 +5116,7 @@ public boolean updateHostPassword(final UpdateHostPasswordCmd cmd) { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { if (logger.isDebugEnabled()) { - logger.debug("Changing password for host name = " + host.getName()); + logger.debug("Changing password for host {}", host); } // update password for this host final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 939781f39f71..092e799eb2ca 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -656,7 +656,7 @@ protected void runInContext() { metrics.put(hostStatsEntry.getHostId(), hostStatsEntry); _hostStats.put(host.getId(), hostStatsEntry); } else { - logger.warn("The Host stats is null for host: " + host.getId()); + logger.warn("The Host stats is null for host: {}", host); } } @@ -1251,7 +1251,7 @@ protected void runInContext() { metrics.clear(); } } catch (Exception e) { - logger.debug("Failed to get VM stats for host with ID: " + host.getId()); + logger.debug("Failed to get VM stats for host with ID: {}", host); continue; } } @@ -1471,8 +1471,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (vmDiskStat_lock == null) { - logger.warn("unable to find vm disk stats from host for account: " + vm.getAccountId() + " with vmId: " + vm.getId() - + " and volumeId:" + volume.getId()); + logger.warn("unable to find vm disk stats from host for account: {} with vm: {} and volume:{}", vm.getAccountId(), vm, volume); continue; } @@ -1518,7 +1517,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - logger.warn(String.format("Error while collecting vm disk stats from host %s : ", host.getName()), e); + logger.warn("Error while collecting vm disk stats from host {} : ", host, e); } } } @@ -1560,8 +1559,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { logger.debug("Cannot find uservm with id: " + vmId + " , continue"); continue; } - logger.debug("Now we are updating the user_statistics table for VM: " + userVm.getInstanceName() - + " after collecting vm network statistics from host: " + host.getName()); + logger.debug("Now we are updating the user_statistics table for VM: {} after collecting vm network statistics from host: {}", userVm, host); for (VmNetworkStats vmNetworkStat : vmNetworkStats) { VmNetworkStatsEntry vmNetworkStatEntry = (VmNetworkStatsEntry)vmNetworkStat; SearchCriteria sc_nic = _nicDao.createSearchCriteria(); @@ -1586,8 +1584,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (vmNetworkStat_lock == null) { - logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() - + " and nicId:" + nic.getId()); + logger.warn("unable to find vm network stats from host for account: {} with vm: {} and nic: {}", userVm.getAccountId(), userVm, nic); continue; } @@ -1623,7 +1620,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - logger.warn(String.format("Error while collecting vm network stats from host %s : ", host.getName()), e); + logger.warn("Error while collecting vm network stats from host {} : ", host, e); } } } @@ -1709,7 +1706,7 @@ protected void runInContext() { Answer answer = ssAhost.sendMessage(command); if (answer != null && answer.getResult()) { storageStats.put(storeId, (StorageStats)answer); - logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); + logger.trace("Store: {} Used: {} Total Available: {}", store, toHumanReadableSize(((StorageStats) answer).getByteUsed()), toHumanReadableSize(((StorageStats) answer).getCapacityBytes())); } } updateStorageStats(storageStats); @@ -1738,8 +1735,8 @@ protected void runInContext() { pool.setCapacityBytes(capacityBytes); poolNeedsUpdating = true; } else { - logger.warn("Not setting capacity bytes, received {} capacity for pool ID {}", - NumbersUtil.toReadableSize(((StorageStats)answer).getCapacityBytes()), poolId); + logger.warn("Not setting capacity bytes, received {} capacity for pool {}", + NumbersUtil.toReadableSize(((StorageStats)answer).getCapacityBytes()), pool); } } if (((_storagePoolStats.get(poolId) != null && _storagePoolStats.get(poolId).getByteUsed() != usedBytes) @@ -1859,12 +1856,12 @@ public boolean imageStoreHasEnoughCapacity(DataStore imageStore) { String readableTotalCapacity = NumbersUtil.toReadableSize((long) totalCapacity); String readableUsedCapacity = NumbersUtil.toReadableSize((long) usedCapacity); - logger.printf(Level.DEBUG, "Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100); + logger.printf(Level.DEBUG, "Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStore, readableTotalCapacity, readableUsedCapacity, threshold * 100); if (usedCapacity / totalCapacity <= threshold) { return true; } - logger.printf(Level.WARN, "Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100); + logger.printf(Level.WARN, "Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStore, readableTotalCapacity, readableUsedCapacity, threshold * 100); return false; } @@ -1891,7 +1888,7 @@ public boolean imageStoreHasEnoughCapacity(DataStore imageStore, Double storeCap * Sends VMs metrics to the configured graphite host. */ protected void sendVmMetricsToGraphiteHost(Map metrics, HostVO host) { - logger.debug(String.format("Sending VmStats of host %s to %s host %s:%s", host.getId(), externalStatsType, externalStatsHost, externalStatsPort)); + logger.debug("Sending VmStats of host {} to {} host {}:{}", host, externalStatsType, externalStatsHost, externalStatsPort); try { GraphiteClient g = new GraphiteClient(externalStatsHost, externalStatsPort); g.sendMetrics(metrics); diff --git a/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java index ad884a33406e..2b786a8f1efe 100644 --- a/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java @@ -209,14 +209,14 @@ private void handleThumbnailRequest(HttpServletRequest req, HttpServletResponse } if (vm.getHostId() == null) { - LOGGER.warn("VM " + vmId + " lost host info, sending blank response for thumbnail request"); + LOGGER.warn("VM {} lost host info, sending blank response for thumbnail request", vm); sendResponse(resp, ""); return; } HostVO host = _ms.getHostBy(vm.getHostId()); if (host == null) { - LOGGER.warn("VM " + vmId + "'s host does not exist, sending blank response for thumbnail request"); + LOGGER.warn("VM {}'s host does not exist, sending blank response for thumbnail request", vm); sendResponse(resp, ""); return; } @@ -263,14 +263,14 @@ private void handleAuthRequest(HttpServletRequest req, HttpServletResponse resp, } if (vm.getHostId() == null) { - LOGGER.warn("VM " + vmId + " lost host info, failed response for authentication request from console proxy"); + LOGGER.warn("VM {} lost host info, failed response for authentication request from console proxy", vm); sendResponse(resp, "failed"); return; } HostVO host = _ms.getHostBy(vm.getHostId()); if (host == null) { - LOGGER.warn("VM " + vmId + "'s host does not exist, sending failed response for authentication request from console proxy"); + LOGGER.warn("VM {}'s host does not exist, sending failed response for authentication request from console proxy", vm); sendResponse(resp, "failed"); return; } @@ -434,14 +434,17 @@ private boolean checkSessionPermision(HttpServletRequest req, long vmId, Account } catch (PermissionDeniedException ex) { if (_accountMgr.isNormalUser(accountObj.getId())) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug("VM access is denied. VM owner account " + vm.getAccountId() + " does not match the account id in session " + - accountObj.getId() + " and caller is a normal user"); + LOGGER.debug("VM access is denied. VM owner account {} does not " + + "match the account id in session {} and caller is a normal user", + _accountMgr.getAccount(vm.getAccountId()), accountObj); } } else if (_accountMgr.isDomainAdmin(accountObj.getId()) || accountObj.getType() == Account.Type.READ_ONLY_ADMIN) { if(LOGGER.isDebugEnabled()) { - LOGGER.debug("VM access is denied. VM owner account " + vm.getAccountId() - + " does not match the account id in session " + accountObj.getId() + " and the domain-admin caller does not manage the target domain"); + LOGGER.debug("VM access is denied. VM owner account {} does not " + + "match the account id in session {} and the domain-admin caller " + + "does not manage the target domain", + _accountMgr.getAccount(vm.getAccountId()), accountObj); } } return false; @@ -479,7 +482,7 @@ public boolean verifyUser(Long userId) { if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.ENABLED) || (account == null) || !account.getState().equals(Account.State.ENABLED)) { - LOGGER.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + LOGGER.warn("Deleted/Disabled/Locked user ({}) with id={} attempting to access public API", user, userId); return false; } return true; @@ -545,15 +548,14 @@ private boolean verifyRequest(Map requestParameters) { Account account = userAcctPair.second(); if (!user.getState().equals(Account.State.ENABLED) || !account.getState().equals(Account.State.ENABLED)) { - LOGGER.debug("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + - "; accountState: " + account.getState()); + LOGGER.debug("disabled or locked user accessing the api, user: {}; state: {}; accountState: {}", user, user.getState(), account.getState()); return false; } // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - LOGGER.debug("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + LOGGER.debug("User does not have a secret key associated with the account -- ignoring request, user: {}", user); return false; } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 2a6494cffcdb..67f2e0ab7a42 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -123,17 +123,17 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { continue; } if (store.isReadonly()) { - logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it... "); + logger.warn("Secondary storage: {} cannot be considered for migration as has read-only permission, Skipping it... ", store); continue; } if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { - logger.warn("Destination image store : " + store.getName() + " not NFS based. Store not suitable for migration!"); + logger.warn("Destination image store : {} not NFS based. Store not suitable for migration!", store); continue; } if (srcStoreDcId != null && store.getDataCenterId() != null && !srcStoreDcId.equals(store.getDataCenterId())) { - logger.warn("Source and destination stores are not in the same zone. Skipping destination store: " + store.getName()); + logger.warn("Source and destination stores are not in the same zone. Skipping destination store: {}", store); continue; } diff --git a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java index bbd2a506e4cb..0845af7b293b 100644 --- a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java @@ -105,11 +105,11 @@ private boolean prepareNodes(String clusterName, List hosts) { for (HostVO h : hosts) { Answer ans = _agentMgr.easySend(h.getId(), cmd); if (ans == null) { - logger.debug("Host " + h.getId() + " is not in UP state, skip preparing OCFS2 node on it"); + logger.debug("Host {} is not in UP state, skip preparing OCFS2 node on it", h); continue; } if (!ans.getResult()) { - logger.warn("PrepareOCFS2NodesCommand failed on host " + h.getId() + " " + ans.getDetails()); + logger.warn("PrepareOCFS2NodesCommand failed on host {} {}", h, ans.getDetails()); return false; } } @@ -150,7 +150,7 @@ public boolean prepareNodes(Long clusterId) { sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); List hosts = sc.list(); if (hosts.isEmpty()) { - logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); + logger.debug("There is no host in cluster {}, no need to prepare OCFS2 nodes", cluster); return true; } @@ -178,8 +178,8 @@ public void processDeleteHostEventBefore(Host host) { @Override public void processDeletHostEventAfter(Host host) { String errMsg = - String.format("Prepare OCFS2 nodes failed after delete host %1$s (zone:%2$s, pod:%3$s, cluster:%4$s", host.getId(), host.getDataCenterId(), host.getPodId(), - host.getClusterId()); + String.format("Prepare OCFS2 nodes failed after delete host %s (zone: %s, pod: %s, cluster: %s", + host, host.getDataCenterId(), host.getPodId(), host.getClusterId()); if (host.getHypervisorType() != HypervisorType.Ovm) { return; diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index b5a206af2892..78aa82b43fef 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1783,7 +1783,7 @@ public void cleanupStorage(boolean recurring) { logger.debug(String.format("Did not find snapshot [%s] in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); } } catch (Exception e) { - logger.error("Failed to delete snapshot [{}] from storage due to: [{}].", snapshotDataStoreVO, e.getMessage()); + logger.error("Failed to delete snapshot [{}] from storage due to: [{}].", snapshot, e.getMessage()); if (logger.isDebugEnabled()) { logger.debug("Failed to delete snapshot [{}] from storage.", snapshot, e); } diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index f1c7c38b8dc6..60494dcb05c0 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -128,8 +128,7 @@ public boolean maintain(DataStore store) { for (StoragePoolVO sp : spes) { if (sp.getParent() != pool.getParent() && sp.getId() != pool.getParent()) { // If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + - " is already in PrepareForMaintenance mode "); + throw new CloudRuntimeException(String.format("Only one storage pool in a cluster can be in PrepareForMaintenance mode, %s is already in PrepareForMaintenance mode ", sp)); } } } @@ -172,7 +171,7 @@ public boolean maintain(DataStore store) { logger.debug("ModifyStoragePool false succeeded"); } if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); + logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool); storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); } } @@ -348,7 +347,7 @@ public boolean cancelMaintain(DataStore store) { logger.debug("ModifyStoragePool add succeeded"); } if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); + logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool); storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 689d159905fa..3657f675fb97 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -845,7 +845,7 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept } if (snapshotCheck.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for volume creation"); + throw new InvalidParameterValueException(String.format("Snapshot %s is not in %s state yet and can't be used for volume creation", snapshotCheck, Snapshot.State.BackedUp)); } SnapshotDataStoreVO snapshotStore = _snapshotDataStoreDao.findOneBySnapshotAndDatastoreRole(snapshotId, DataStoreRole.Primary); @@ -922,7 +922,7 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept // Check if zone is disabled if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } // If local storage is disabled then creation of volume with local disk @@ -1054,10 +1054,11 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { created = false; VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); vol.stateTransit(Volume.Event.DestroyRequested); - throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e); + throw new CloudRuntimeException(String.format("Failed to create volume: %s", volume), e); } finally { if (!created) { - logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); + VolumeVO finalVolume = volume; + logger.trace("Decrementing volume resource count for account {} as volume failed to create on the backend", () -> _accountMgr.getAccount(finalVolume.getAccountId())); _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), cmd.getDisplayVolume(), volume.getSize(), _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId())); } @@ -1595,7 +1596,7 @@ private boolean deleteVolumeFromStorage(VolumeVO volume, Account caller) throws cleanVolumesCache(volume); return true; } catch (InterruptedException | ExecutionException e) { - logger.warn("Failed to expunge volume: " + volume.getUuid(), e); + logger.warn("Failed to expunge volume: {}", volume, e); return false; } } @@ -1678,14 +1679,13 @@ protected void expungeVolumesInSecondaryStorageIfNeeded(VolumeVO volume) throws private void expungeVolumesInPrimaryOrSecondary(VolumeVO volume, DataStoreRole role) throws InterruptedException, ExecutionException { if (!canAccessVolumeStore(volume, role)) { - logger.debug(String.format("Cannot access the storage pool with role: %s " + - "for the volume: %s, skipping expunge from storage", - role.name(), volume.getName())); + logger.debug("Cannot access the storage pool with role: {} " + + "for the volume: {}, skipping expunge from storage", role.name(), volume); return; } VolumeInfo volOnStorage = volFactory.getVolume(volume.getId(), role); if (volOnStorage != null) { - logger.info("Expunging volume " + volume.getId() + " from " + role + " data store"); + logger.info("Expunging volume {} from {} data store", volume, role); AsyncCallFuture future = volService.expungeVolumeAsync(volOnStorage); VolumeApiResult result = future.get(); if (result.isFailed()) { @@ -1722,7 +1722,7 @@ protected void cleanVolumesCache(VolumeVO volume) { return; } for (VolumeInfo volOnCache : cacheVols) { - logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); + logger.info("Delete volume from image cache store: {}", volOnCache.getDataStore()); volOnCache.delete(); } } @@ -1773,7 +1773,7 @@ public Volume destroyVolume(long volumeId, Account caller, boolean expunge, bool stateTransitTo(volume, Volume.Event.DestroyRequested); stateTransitTo(volume, Volume.Event.OperationSucceeded); } catch (NoTransitionException e) { - logger.debug("Failed to destroy volume" + volume.getId(), e); + logger.debug("Failed to destroy volume{}", volume, e); return null; } _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), volume.isDisplay(), @@ -1781,7 +1781,7 @@ public Volume destroyVolume(long volumeId, Account caller, boolean expunge, bool return volume; } if (!deleteVolumeFromStorage(volume, caller)) { - logger.warn("Failed to expunge volume: " + volumeId); + logger.warn("Failed to expunge volume: {}", volume); return null; } removeVolume(volume.getId()); @@ -1806,7 +1806,7 @@ public Volume recoverVolume(long volumeId) { final VolumeVO volume = _volsDao.findById(volumeId); if (volume == null) { - throw new InvalidParameterValueException("Unable to find a volume with id " + volume); + throw new InvalidParameterValueException(String.format("Unable to find a volume with id %d", volumeId)); } // When trying to expunge, permission is denied when the caller is not an admin and the AllowUserExpungeRecoverVolume is false for the caller. @@ -1831,8 +1831,8 @@ public Volume recoverVolume(long volumeId) { _volsDao.detachVolume(volume.getId()); stateTransitTo(volume, Volume.Event.RecoverRequested); } catch (NoTransitionException e) { - logger.debug("Failed to recover volume" + volume.getId(), e); - throw new CloudRuntimeException("Failed to recover volume" + volume.getId(), e); + logger.debug("Failed to recover volume {}", volume, e); + throw new CloudRuntimeException(String.format("Failed to recover volume %s", volume), e); } _resourceLimitMgr.incrementVolumeResourceCount(volume.getAccountId(), volume.isDisplay(), volume.getSize(), _diskOfferingDao.findById(volume.getDiskOfferingId())); @@ -1856,7 +1856,7 @@ public void publishVolumeCreationUsageEvent(Volume volume) { .publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), offeringId, volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplay()); - logger.debug(String.format("Volume [%s] has been successfully recovered, thus a new usage event %s has been published.", volume.getUuid(), EventTypes.EVENT_VOLUME_CREATE)); + logger.debug("Volume [{}] has been successfully recovered, thus a new usage event {} has been published.", volume, EventTypes.EVENT_VOLUME_CREATE); } @Override @@ -2071,7 +2071,7 @@ public Volume changeDiskOfferingForVolumeInternal(Long volumeId, Long newDiskOff /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */ // We need to publish this event to usage_volume table if (volume.getState() == Volume.State.Allocated) { - logger.debug(String.format("Volume %s is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume.getUuid())); + logger.debug("Volume {} is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume); volume.setSize(newSize); volume.setMinIops(newMinIops); @@ -2121,12 +2121,12 @@ public Volume changeDiskOfferingForVolumeInternal(Long volumeId, Long newDiskOff if (volumeMigrateRequired) { if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume ID: %s as no suitable pool(s) found for migrating to support new disk offering", volume.getUuid())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume: %s as no suitable pool(s) found for migrating to support new disk offering", volume)); } final Long newSizeFinal = newSize; List suitableStoragePoolsWithEnoughSpace = suitableStoragePools.stream().filter(pool -> storageMgr.storagePoolHasEnoughSpaceForResize(pool, 0L, newSizeFinal)).collect(Collectors.toList()); if (CollectionUtils.isEmpty(suitableStoragePoolsWithEnoughSpace)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume ID: %s as no suitable pool(s) with enough space found for volume migration.", volume.getUuid())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume change offering operation failed for volume: %s as no suitable pool(s) with enough space found for volume migration.", volume)); } Collections.shuffle(suitableStoragePoolsWithEnoughSpace); MigrateVolumeCmd migrateVolumeCmd = new MigrateVolumeCmd(volume.getId(), suitableStoragePoolsWithEnoughSpace.get(0).getId(), newDiskOffering.getId(), true); @@ -2134,10 +2134,10 @@ public Volume changeDiskOfferingForVolumeInternal(Long volumeId, Long newDiskOff Volume result = migrateVolume(migrateVolumeCmd); volume = (result != null) ? _volsDao.findById(result.getId()) : null; if (volume == null) { - throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s migration failed to storage pool %s", volume.getUuid(), suitableStoragePools.get(0).getId())); + throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume: %s migration failed to storage pool %s", volume, suitableStoragePools.get(0))); } } catch (Exception e) { - throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s migration failed to storage pool %s due to %s", volume.getUuid(), suitableStoragePools.get(0).getId(), e.getMessage())); + throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume: %s migration failed to storage pool %s due to %s", volume, suitableStoragePools.get(0), e.getMessage())); } } @@ -2464,13 +2464,9 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device } } if (logger.isTraceEnabled()) { - String msg = "attaching volume %s/%s to a VM (%s/%s) with an existing volume %s/%s on primary storage %s"; if (existingVolumeOfVm != null) { - logger.trace(String.format(msg, - volumeToAttach.getName(), volumeToAttach.getUuid(), - vm.getName(), vm.getUuid(), - existingVolumeOfVm.getName(), existingVolumeOfVm.getUuid(), - existingVolumeOfVm.getPoolId())); + logger.trace("attaching volume {} to a VM {} with an existing volume {} on primary storage {}", + volumeToAttach, vm, existingVolumeOfVm, _storagePoolDao.findById(existingVolumeOfVm.getPoolId())); } } @@ -2484,7 +2480,7 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device if (existingVolumeOfVm != null && !existingVolumeOfVm.getState().equals(Volume.State.Allocated)) { destPrimaryStorage = _storagePoolDao.findById(existingVolumeOfVm.getPoolId()); if (logger.isTraceEnabled() && destPrimaryStorage != null) { - logger.trace(String.format("decided on target storage: %s/%s", destPrimaryStorage.getName(), destPrimaryStorage.getUuid())); + logger.trace("decided on target storage: {}", destPrimaryStorage); } } @@ -2567,8 +2563,8 @@ public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId()); if (logger.isTraceEnabled() && volumeToAttachStoragePool != null) { - logger.trace(String.format("volume to attach (%s/%s) has a primary storage assigned to begin with (%s/%s)", - volumeToAttach.getName(), volumeToAttach.getUuid(), volumeToAttachStoragePool.getName(), volumeToAttachStoragePool.getUuid())); + logger.trace("volume to attach {} has a primary storage assigned to begin with {}", + volumeToAttach, volumeToAttachStoragePool); } checkForMatchingHypervisorTypesIf(volumeToAttachStoragePool != null && !volumeToAttachStoragePool.isManaged(), rootDiskHyperType, volumeToAttachHyperType); @@ -2578,12 +2574,8 @@ public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean AsyncJob job = asyncExecutionContext.getJob(); if (logger.isInfoEnabled()) { - logger.info(String.format("Trying to attach volume [%s/%s] to VM instance [%s/%s], update async job-%s progress status", - volumeToAttach.getName(), - volumeToAttach.getUuid(), - vm.getName(), - vm.getUuid(), - job.getId())); + logger.info("Trying to attach volume [{}] to VM instance [{}], update async job-{} [{}] progress status", + volumeToAttach, vm, job.getId(), job); } DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId()); @@ -3005,10 +2997,10 @@ public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { AsyncJob job = asyncExecutionContext.getJob(); if (logger.isInfoEnabled()) { - logger.info(String.format("Trying to attach volume %s to VM instance %s, update async job-%s progress status", - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name", "uuid"), - ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "name", "uuid"), - job.getId())); + logger.info("Trying to attach volume {} to VM instance {}, update async job-{} progress status", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "id", "name", "uuid"), + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "id", "name", "uuid"), + job.getId()); } _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId); @@ -3144,7 +3136,7 @@ private Volume orchestrateDetachVolumeFromVM(long vmId, long volumeId) { volumeVO.setPoolId(storagePoolVO.getId()); _volsDao.update(volumeVO.getId(), volumeVO); } else { - logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); + logger.warn("Unable to find datastore {} while updating the new datastore of the volume {}", datastoreName, volume); } } @@ -3228,19 +3220,19 @@ private void handleTargetsForVMware(long hostId, String storageAddress, int stor cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } @@ -3555,11 +3547,10 @@ protected void validateConditionsToReplaceDiskOfferingOfVolume(VolumeVO volume, if (volume.getSize() != newDiskOffering.getDiskSize()) { DiskOfferingVO oldDiskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId()); - logger.warn(String.format( - "You are migrating a volume [id=%s] and changing the disk offering[from id=%s to id=%s] to reflect this migration. However, the sizes of the volume and the new disk offering are different.", - volume.getUuid(), oldDiskOffering.getUuid(), newDiskOffering.getUuid())); + logger.warn("You are migrating a volume [{}] and changing the disk offering[from {} to {}] to reflect this migration. However, the sizes of the volume and the new disk offering are different.", + volume, oldDiskOffering, newDiskOffering); } - logger.info(String.format("Changing disk offering to [uuid=%s] while migrating volume [uuid=%s, name=%s].", newDiskOffering.getUuid(), volume.getUuid(), volume.getName())); + logger.info("Changing disk offering to [{}] while migrating volume [{}].", newDiskOffering, volume); } /** @@ -3736,7 +3727,7 @@ private Snapshot takeSnapshotInternal(Long volumeId, Long policyId, Long snapsho _accountMgr.checkAccess(caller, null, true, volume); if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); @@ -3817,7 +3808,7 @@ private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Lon } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("VolumeId: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && BooleanUtils.toBoolean(_configDao.getValue("sp.bypass.secondary.storage")); @@ -3869,11 +3860,11 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } if (ImageFormat.DIR.equals(volume.getFormat())) { - throw new InvalidParameterValueException("Snapshot not supported for volume:" + volumeId); + throw new InvalidParameterValueException(String.format("Snapshot not supported for volume: %s", volume.getVolume())); } if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); @@ -3883,7 +3874,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, userVmVO = _userVmDao.findById(instanceId); } if (!isOperationSupported(template, userVmVO)) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); + throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume.getVolume())); } } @@ -3899,7 +3890,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, StoragePool storagePool = (StoragePool)volume.getDataStore(); if (storagePool == null) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " please attach this volume to a VM before create snapshot for it"); + throw new InvalidParameterValueException(String.format("Volume: %s please attach this volume to a VM before create snapshot for it", volume.getVolume())); } if (CollectionUtils.isNotEmpty(zoneIds)) { @@ -3949,7 +3940,7 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName _accountMgr.checkAccess(caller, null, true, volume); VirtualMachine attachVM = volume.getAttachedVM(); if (attachVM == null || attachVM.getId() != vm.getId()) { - throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't attach to vm :" + vm); + throw new InvalidParameterValueException(String.format("Creating snapshot failed due to volume:%s doesn't attach to vm :%s", volume.getVolume(), vm)); } DataCenter zone = _dcDao.findById(volume.getDataCenterId()); @@ -3962,7 +3953,7 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } if (volume.getTemplateId() != null) { @@ -3973,13 +3964,13 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName userVmVO = _userVmDao.findById(instanceId); } if (!isOperationSupported(template, userVmVO)) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); + throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume.getVolume())); } } StoragePool storagePool = (StoragePool)volume.getDataStore(); if (storagePool == null) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " please attach this volume to a VM before create snapshot for it"); + throw new InvalidParameterValueException(String.format("Volume: %s please attach this volume to a VM before create snapshot for it", volume.getVolume())); } if (storagePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { @@ -4026,7 +4017,7 @@ public String extractVolume(ExtractVolumeCmd cmd) { // Extract activity only for detached volumes or for volumes whose // instance is stopped if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { - logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); + logger.debug("Invalid state of the volume: {}. It should be either detached or the VM should be in stopped state.", volume); PermissionDeniedException ex = new PermissionDeniedException("Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state."); ex.addProxyObject(volume.getUuid(), "volumeId"); throw ex; @@ -4199,7 +4190,7 @@ protected void validateVolume(String volumeUuid, VolumeVO volume) { throw new InvalidParameterValueException(String.format("No volume was found with UUID [%s].", volumeUuid)); } - String volumeToString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name", "uuid"); + String volumeToString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "id", "name", "uuid"); if (volume.getInstanceId() != null) { VMInstanceVO vmInstanceVo = _vmInstanceDao.findById(volume.getInstanceId()); @@ -4260,8 +4251,7 @@ private Optional setExtractVolumeSearchCriteria(SearchCriteria attachVolumeToVmThroughJobQueue(final Long vmId, final Lo _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); AsyncJobVO jobVo = _jobMgr.getAsyncJob(workJob.getId()); - logger.debug("New job " + workJob.getId() + ", result field: " + jobVo.getResult()); + logger.debug("New job {}, result field: {}", workJob, jobVo.getResult()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index 3c032306aa20..488e77ede29b 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -204,12 +204,12 @@ public void setDisconnected() { public void logDisconnect() { logger.warn("Unable to monitor download progress of {} : uuid: {}({}) at host [id: {}, uuid: {}]", - object.getType(), object.getId(), object, _ssAgent.getId(), _ssAgent.getUuid()); + object.getType(), object.getUuid(), object, _ssAgent.getId(), _ssAgent.getUuid()); } public void log(String message, Level level) { logger.log(level, "{}, {}: {}({}) at host [id: {}, uuid: {}]", - message, object.getType(), object.getId(), object, _ssAgent.getId(), _ssAgent.getUuid()); + message, object.getType(), object.getUuid(), object, _ssAgent.getId(), _ssAgent.getUuid()); } public DownloadListener(DownloadMonitorImpl monitor) { diff --git a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java index d21257516e29..67d5b091a032 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java @@ -171,7 +171,7 @@ private void initiateTemplateDownload(DataObject template, AsyncCompletionCallba try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - logger.warn("Unable to start /resume download of template " + template.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of template {} to {}", template, store, e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -228,7 +228,7 @@ private void initiateSnapshotDownload(DataObject snapshot, AsyncCompletionCallba try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - logger.warn("Unable to start /resume download of snapshot " + snapshot.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of snapshot {} to {}", snapshot, store, e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -305,7 +305,7 @@ public void downloadVolumeToStorage(DataObject volume, AsyncCompletionCallback, Integer> listSnapshots(ListSnapshotsCmd cm } @Override - public boolean deleteSnapshotDirsForAccount(long accountId) { + public boolean deleteSnapshotDirsForAccount(Account account) { + long accountId = account.getId(); List volumes = _volsDao.findIncludingRemovedByAccount(accountId); // The above call will list only non-destroyed volumes. // So call this method before marking the volumes as destroyed. @@ -963,11 +963,11 @@ public boolean deleteSnapshotDirsForAccount(long accountId) { answer = ep.sendMessage(cmd); } if ((answer != null) && answer.getResult()) { - logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId); + logger.debug("Deleted all snapshots for volume {} under account {}", volume, account); } else { success = false; if (answer != null) { - logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri()); + logger.warn("Failed to delete all snapshot for volume {} on secondary storage {}", volume, ssHost.getUri()); logger.error(answer.getDetails()); } } @@ -978,7 +978,7 @@ public boolean deleteSnapshotDirsForAccount(long accountId) { for (SnapshotVO snapshot : snapshots) { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshot.getId() + "'"); + logger.error("Unable to find snapshot strategy to handle snapshot [{}]", snapshot); continue; } List snapshotStoreRefs = _snapshotStoreDao.listReadyBySnapshot(snapshot.getId(), DataStoreRole.Image); @@ -1038,14 +1038,12 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy throw new UnsupportedOperationException(String.format("Encrypted volumes don't support snapshot schedules, cannot create snapshot policy for the volume [%s]", volume.getUuid())); } - String volumeDescription = volume.getVolumeDescription(); - final Account caller = CallContext.current().getCallingAccount(); _accountMgr.checkAccess(caller, null, true, volume); // If display is false we don't actually schedule snapshots. if (volume.getState() != Volume.State.Ready && display) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume, Volume.State.Ready, volume.getState())); } if (volume.getTemplateId() != null) { @@ -1056,7 +1054,7 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy userVmVO = _vmDao.findById(instanceId); } if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()) || !UserVmManager.SHAREDFSVM.equals(userVmVO.getUserVmType()))) { - throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); + throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume)); } } @@ -1067,7 +1065,7 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy // It is not detached, but attached to a VM if (_vmDao.findById(instanceId) == null) { // It is not a UserVM but a SystemVM or DomR - throw new InvalidParameterValueException(String.format("Failed to create snapshot policy [%s] for volume %s; Snapshots of volumes attached to System or router VM are not allowed.", intervalType, volumeDescription)); + throw new InvalidParameterValueException(String.format("Failed to create snapshot policy [%s] for volume %s; Snapshots of volumes attached to System or router VM are not allowed.", intervalType, volume)); } } @@ -1081,8 +1079,7 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy TimeZone timeZone = TimeZone.getTimeZone(cmdTimezone); String timezoneId = timeZone.getID(); if (!timezoneId.equals(cmdTimezone)) { - logger.warn(String.format("Using timezone [%s] for running the snapshot policy [%s] for volume %s, as an equivalent of [%s].", timezoneId, intervalType, volumeDescription, - cmdTimezone)); + logger.warn("Using timezone [{}] for running the snapshot policy [{}] for volume {}, as an equivalent of [{}].", timezoneId, intervalType, volume, cmdTimezone); } String schedule = cmd.getSchedule(); @@ -1091,18 +1088,18 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy DateUtil.getNextRunTime(intvType, schedule, timezoneId, null); } catch (Exception e) { throw new InvalidParameterValueException(String.format("%s has an invalid schedule [%s] for interval type [%s].", - volumeDescription, schedule, intervalType)); + volume, schedule, intervalType)); } int maxSnaps = cmd.getMaxSnaps(); if (maxSnaps <= 0) { - throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s should be greater than 0.", maxSnaps, volumeDescription)); + throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s should be greater than 0.", maxSnaps, volume)); } int intervalMaxSnaps = type.getMax(); if (maxSnaps > intervalMaxSnaps) { - throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s exceeds limit [%s] for interval type [%s].", maxSnaps, volumeDescription, + throw new InvalidParameterValueException(String.format("maxSnaps [%s] for volume %s exceeds limit [%s] for interval type [%s].", maxSnaps, volume, intervalMaxSnaps, intervalType)); } @@ -1131,16 +1128,15 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy protected SnapshotPolicyVO persistSnapshotPolicy(VolumeVO volume, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean display, boolean active, Map tags, List zoneIds) { long volumeId = volume.getId(); - String volumeDescription = volume.getVolumeDescription(); GlobalLock createSnapshotPolicyLock = GlobalLock.getInternLock("createSnapshotPolicy_" + volumeId); boolean isLockAcquired = createSnapshotPolicyLock.lock(5); if (!isLockAcquired) { - throw new CloudRuntimeException(String.format("Unable to acquire lock for creating snapshot policy [%s] for %s.", intervalType, volumeDescription)); + throw new CloudRuntimeException(String.format("Unable to acquire lock for creating snapshot policy [%s] for %s.", intervalType, volume)); } - logger.debug(String.format("Acquired lock for creating snapshot policy [%s] for volume %s.", intervalType, volumeDescription)); + logger.debug("Acquired lock for creating snapshot policy [{}] for volume {}.", intervalType, volume); try { SnapshotPolicyVO policy = _snapshotPolicyDao.findOneByVolumeInterval(volumeId, intervalType); @@ -1211,8 +1207,8 @@ public void copySnapshotPoliciesBetweenVolumes(VolumeVO srcVolume, VolumeVO dest IntervalType[] intervalTypes = IntervalType.values(); List policies = listPoliciesforVolume(srcVolume.getId()); - logger.debug(String.format("Copying snapshot policies %s from volume %s to volume %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(policies, - "id", "uuid"), srcVolume.getVolumeDescription(), destVolume.getVolumeDescription())); + logger.debug("Copying snapshot policies {} from volume {} to volume {}.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(policies, + "id", "uuid"), srcVolume, destVolume); for (SnapshotPolicyVO policy : policies) { List details = snapshotPolicyDetailsDao.findDetails(policy.getId(), ApiConstants.ZONE_ID); @@ -1410,8 +1406,7 @@ private boolean supportedByHypervisor(VolumeInfo volume, boolean isFromVmSnapsho UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); if (userVm != null) { if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { - throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() + " is in " - + userVm.getState().toString() + " state"); + throw new CloudRuntimeException(String.format("Creating snapshot failed due to volume: %s is associated with vm: %s is in %s state", volume, userVm, userVm.getState().toString())); } if (userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { @@ -1448,7 +1443,7 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); if (snapshotStrategy == null) { - throw new CloudRuntimeException("Can't find snapshot strategy to deal with snapshot:" + snapshotId); + throw new CloudRuntimeException(String.format("Can't find snapshot strategy to deal with snapshot:%s", snapshot.getSnapshotVO())); } SnapshotInfo snapshotOnPrimary = snapshotStrategy.takeSnapshot(snapshot); @@ -1457,7 +1452,7 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc if (backupSnapToSecondary) { backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds()); } else { - logger.debug("skipping backup of snapshot [uuid=" + snapshot.getUuid() + "] to secondary due to configuration"); + logger.debug("skipping backup of snapshot [{}] to secondary due to configuration", snapshot); snapshotOnPrimary.markBackedUp(); } @@ -1469,7 +1464,7 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc List snapshotStoreRefs = _snapshotStoreDao.listReadyBySnapshot(snapshotId, dataStoreRole); if (CollectionUtils.isEmpty(snapshotStoreRefs)) { - throw new CloudRuntimeException(String.format("Could not find snapshot %s [%s] on [%s]", snapshot.getName(), snapshot.getUuid(), snapshot.getLocationType())); + throw new CloudRuntimeException(String.format("Could not find snapshot %s on [%s]", snapshot.getSnapshotVO(), snapshot.getLocationType())); } SnapshotDataStoreVO snapshotStoreRef = snapshotStoreRefs.get(0); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, @@ -1540,10 +1535,10 @@ protected void runInContext() { } } catch (final Exception e) { if (attempts >= 0) { - logger.debug("Backing up of snapshot failed, for snapshot with ID " + snapshot.getSnapshotId() + ", left with " + attempts + " more attempts"); + logger.debug("Backing up of snapshot failed, for snapshot {}, left with {} more attempts", snapshot, attempts); backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshot, --attempts, snapshotStrategy, zoneIds), snapshotBackupRetryInterval, TimeUnit.SECONDS); } else { - logger.debug("Done with " + snapshotBackupRetries + " attempts in backing up of snapshot with ID " + snapshot.getSnapshotId()); + logger.debug("Done with {} attempts in backing up of snapshot {}", snapshotBackupRetries, snapshot.getSnapshotVO()); snapshotSrv.cleanupOnSnapshotBackupFailure(snapshot); } } @@ -1591,10 +1586,10 @@ public boolean start() { for (SnapshotVO snapshotVO : snapshots) { try { if (!deleteSnapshot(snapshotVO.getId(), null)) { - logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); + logger.debug("Failed to delete snapshot in destroying state: {}", snapshotVO); } } catch (Exception e) { - logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); + logger.debug("Failed to delete snapshot in destroying state: {}", snapshotVO); } } return true; @@ -1677,7 +1672,7 @@ public void cleanupSnapshotsByVolume(Long volumeId) { snapshotSrv.deleteSnapshot(info); } } catch (CloudRuntimeException e) { - String msg = "Cleanup of Snapshot with uuid " + info.getUuid() + " in primary storage is failed. Ignoring"; + String msg = String.format("Cleanup of Snapshot %s in primary storage is failed. Ignoring", info); logger.warn(msg); } } @@ -1704,7 +1699,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, _resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue()); } catch (ResourceAllocationException e) { if (snapshotType != Type.MANUAL) { - String msg = "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots"; + String msg = String.format("Snapshot resource limit exceeded for account %s. Failed to create recurring snapshots", owner); logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots; please use updateResourceLimit to increase the limit"); @@ -1748,7 +1743,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, SnapshotVO snapshot = _snapshotDao.persist(snapshotVO); if (snapshot == null) { - throw new CloudRuntimeException("Failed to create snapshot for volume: " + volume.getId()); + throw new CloudRuntimeException(String.format("Failed to create snapshot for volume: %s", volume)); } CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid()); _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot); @@ -1775,7 +1770,7 @@ private boolean checkAndProcessSnapshotAlreadyExistInStore(long snapshotId, Data } if (dstSnapshotStore.getState() == ObjectInDataStoreStateMachine.State.Ready) { if (!dstSnapshotStore.isDisplay()) { - logger.debug(String.format("Snapshot ID: %d is in ready state on image store ID: %d, marking it displayable for view", snapshotId, dstSnapshotStore.getDataStoreId())); + logger.debug("Snapshot ID: {} is in ready state on image store: {}, marking it displayable for view", snapshotId, dstSecStore); dstSnapshotStore.setDisplay(true); _snapshotStoreDao.update(dstSnapshotStore.getId(), dstSnapshotStore); } @@ -1812,10 +1807,10 @@ private boolean copySnapshotToZone(SnapshotDataStoreVO snapshotDataStoreVO, Data copyUrl = result.getPath(); } } catch (InterruptedException | ExecutionException | ResourceUnavailableException ex) { - logger.error(String.format("Failed to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName()), ex); + logger.error("Failed to prepare URL for copy for snapshot ID: {} on store: {}", snapshotId, srcSecStore, ex); } if (StringUtils.isEmpty(copyUrl)) { - logger.error(String.format("Unable to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName())); + logger.error("Unable to prepare URL for copy for snapshot ID: {} on store: {}", snapshotId, srcSecStore); return false; } logger.debug(String.format("Copying snapshot ID: %d to destination zones using download URL: %s", snapshotId, copyUrl)); @@ -1823,7 +1818,7 @@ private boolean copySnapshotToZone(SnapshotDataStoreVO snapshotDataStoreVO, Data AsyncCallFuture future = snapshotSrv.copySnapshot(snapshotOnSecondary, copyUrl, dstSecStore); SnapshotResult result = future.get(); if (result.isFailed()) { - logger.debug(String.format("Copy snapshot ID: %d failed for image store %s: %s", snapshotId, dstSecStore.getName(), result.getResult())); + logger.debug("Copy snapshot ID: {} failed for image store {}: {}", snapshotId, dstSecStore, result.getResult()); return false; } snapshotZoneDao.addSnapshotToZone(snapshotId, dstZoneId); @@ -1835,7 +1830,7 @@ private boolean copySnapshotToZone(SnapshotDataStoreVO snapshotDataStoreVO, Data } return true; } catch (InterruptedException | ExecutionException | ResourceUnavailableException ex) { - logger.debug(String.format("Failed to copy snapshot ID: %d to image store: %s", snapshotId, dstSecStore.getName())); + logger.debug("Failed to copy snapshot ID: {} to image store: {}", snapshotId, dstSecStore); } return false; } @@ -1852,9 +1847,8 @@ private boolean copySnapshotChainToZone(SnapshotVO snapshotVO, DataStore srcSecS do { dstSecStore = getSnapshotZoneImageStore(currentSnap.getSnapshotId(), destZone.getId()); if (dstSecStore != null) { - logger.debug(String.format("Snapshot ID: %d is already present in secondary storage: %s" + - " in zone %s in ready state, don't need to copy any further", - currentSnap.getSnapshotId(), dstSecStore.getName(), destZone)); + logger.debug("Snapshot {} is already present in secondary storage: {}" + + " in zone {} in ready state, don't need to copy any further", snapshotVO, dstSecStore, destZone); if (snapshotId == currentSnap.getSnapshotId()) { checkAndProcessSnapshotAlreadyExistInStore(snapshotId, dstSecStore); } @@ -1887,11 +1881,11 @@ private boolean copySnapshotChainToZone(SnapshotVO snapshotVO, DataStore srcSecS throw new StorageUnavailableException("Destination zone is not ready, no image store with free capacity", DataCenter.class, destZoneId); } } - logger.debug(String.format("Copying snapshot chain for snapshot ID: %d on secondary store: %s of zone ID: %d", snapshotId, dstSecStore.getName(), destZoneId)); + logger.debug("Copying snapshot chain for snapshot ID: {} on secondary store: {} of zone ID: {}", snapshotVO, dstSecStore, destZone); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotChain) { if (!copySnapshotToZone(snapshotDataStoreVO, srcSecStore, destZone, dstSecStore, account)) { - logger.error(String.format("Failed to copy snapshot: %s to zone: %s due to failure to copy snapshot ID: %d from snapshot chain", - snapshotVO, destZone, snapshotDataStoreVO.getSnapshotId())); + logger.error("Failed to copy snapshot: {} to zone: {} due to failure to copy snapshot ID: {} from snapshot chain", + snapshotVO, destZone, snapshotDataStoreVO.getSnapshotId()); return false; } } @@ -2001,7 +1995,7 @@ protected void copyNewSnapshotToZones(long snapshotId, long zoneId, List d String completedEventLevel = EventVO.LEVEL_ERROR; String completedEventMsg = String.format("Copying snapshot ID: %s failed", snapshotVO.getUuid()); if (dataStore == null) { - logger.error(String.format("Unable to find an image store for zone ID: %d where snapshot %s is in Ready state", zoneId, snapshotVO)); + logger.error("Unable to find an image store for zone: {} where snapshot {} is in Ready state", dataCenterDao.findById(zoneId), snapshotVO); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), completedEventLevel, EventTypes.EVENT_SNAPSHOT_COPY, completedEventMsg, snapshotId, ApiCommandResourceType.Snapshot.toString(), startEventId); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 2a53021636c5..8d4fd0e7aed3 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -126,7 +126,7 @@ private Date getNextScheduledTime(final long policyId, final Date currentTimesta nextTimestamp = DateUtil.getNextRunTime(type, schedule, timezone, currentTimestamp); final String currentTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, currentTimestamp); final String nextScheduledTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, nextTimestamp); - logger.debug("Current time is " + currentTime + ". NextScheduledTime of policyId " + policyId + " is " + nextScheduledTime); + logger.debug("Current time is {}. NextScheduledTime of policy {} is {}", currentTime, policy, nextScheduledTime); } return nextTimestamp; } @@ -220,7 +220,7 @@ protected void deleteExpiredVMSnapshots() { long diffInHours = TimeUnit.MILLISECONDS.toHours(now.getTime() - creationTime.getTime()); if (diffInHours >= expiration_interval_hours) { if (logger.isDebugEnabled()){ - logger.debug("Deleting expired VM snapshot id: " + vmSnapshot.getId()); + logger.debug("Deleting expired VM snapshot: {}", vmSnapshot); } _vmSnaphostManager.deleteVMSnapshot(vmSnapshot.getId()); } @@ -240,9 +240,8 @@ protected void scheduleSnapshots() { final long snapshotScheId = snapshotToBeExecuted.getId(); final long policyId = snapshotToBeExecuted.getPolicyId(); final long volumeId = snapshotToBeExecuted.getVolumeId(); + final VolumeVO volume = _volsDao.findByIdIncludingRemoved(snapshotToBeExecuted.getVolumeId()); try { - final VolumeVO volume = _volsDao.findByIdIncludingRemoved(snapshotToBeExecuted.getVolumeId()); - if (!canSnapshotBeScheduled(snapshotToBeExecuted, volume)) { continue; } @@ -252,7 +251,7 @@ protected void scheduleSnapshots() { ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, volume.getAccountId(), EventTypes.EVENT_SNAPSHOT_CREATE, "creating snapshot for volume Id:" + volume.getUuid(), volumeId, ApiCommandResourceType.Volume.toString(), true, 0); - logger.trace(String.format("Mapping parameters required to generate a CreateSnapshotCmd for snapshot [%s].", snapshotToBeExecuted.getUuid())); + logger.trace("Mapping parameters required to generate a CreateSnapshotCmd for snapshot [{}].", snapshotToBeExecuted); final Map params = new HashMap(); params.put(ApiConstants.VOLUME_ID, "" + volumeId); params.put(ApiConstants.POLICY_ID, "" + policyId); @@ -269,7 +268,7 @@ protected void scheduleSnapshots() { } } - logger.trace(String.format("Generating a CreateSnapshotCmd for snapshot [%s] with parameters: [%s].", snapshotToBeExecuted.getUuid(), params.toString())); + logger.trace("Generating a CreateSnapshotCmd for snapshot [{}] with parameters: [{}].", snapshotToBeExecuted, params.toString()); final CreateSnapshotCmd cmd = new CreateSnapshotCmd(); ComponentContext.inject(cmd); _dispatcher.dispatchCreateCmd(cmd, params); @@ -278,18 +277,18 @@ protected void scheduleSnapshots() { final Date scheduledTimestamp = snapshotToBeExecuted.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - logger.debug(String.format("Scheduling snapshot [%s] for volume [%s] at [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), displayTime)); + logger.debug("Scheduling snapshot [{}] for volume [{}] at [{}].", snapshotToBeExecuted, volume, displayTime); AsyncJobVO job = new AsyncJobVO("", User.UID_SYSTEM, volume.getAccountId(), CreateSnapshotCmd.class.getName(), ApiGsonHelper.getBuilder().create().toJson(params), cmd.getEntityId(), cmd.getApiResourceType() != null ? cmd.getApiResourceType().toString() : null, null); job.setDispatcher(_asyncDispatcher.getName()); final long jobId = _asyncMgr.submitAsyncJob(job); - logger.debug(String.format("Scheduled snapshot [%s] for volume [%s] as job [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), job.getUuid())); + logger.debug("Scheduled snapshot [{}] for volume [{}] as job [{}].", snapshotToBeExecuted, volume, job); tmpSnapshotScheduleVO.setAsyncJobId(jobId); _snapshotScheduleDao.update(snapshotScheId, tmpSnapshotScheduleVO); } catch (final Exception e) { - logger.error(String.format("The scheduling of snapshot [%s] for volume [%s] failed due to [%s].", snapshotToBeExecuted.getUuid(), volumeId, e.toString()), e); + logger.error("The scheduling of snapshot [{}] for volume [{}] failed due to [{}].", snapshotToBeExecuted, volume, e.toString(), e); } finally { if (tmpSnapshotScheduleVO != null) { _snapshotScheduleDao.releaseFromLockTable(snapshotScheId); @@ -307,14 +306,13 @@ protected void scheduleSnapshots() { */ protected boolean canSnapshotBeScheduled(final SnapshotScheduleVO snapshotToBeScheduled, final VolumeVO volume) { if (volume.getRemoved() != null) { - logger.warn(String.format("Skipping snapshot [%s] for volume [%s] because it has been removed. Having a snapshot scheduled for a volume that has been " - + "removed is an inconsistency; please, check your database.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); + logger.warn("Skipping snapshot [{}] for volume [{}] because it has been removed. Having a snapshot scheduled for a volume that has been " + + "removed is an inconsistency; please, check your database.", snapshotToBeScheduled, volume); return false; } if (volume.getPoolId() == null) { - logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because it is not attached to any storage pool.", snapshotToBeScheduled.getUuid(), - volume.getVolumeDescription())); + logger.debug("Skipping snapshot [{}] for volume [{}] because it is not attached to any storage pool.", snapshotToBeScheduled, volume); return false; } @@ -323,12 +321,13 @@ protected boolean canSnapshotBeScheduled(final SnapshotScheduleVO snapshotToBeSc } if (_snapshotPolicyDao.findById(snapshotToBeScheduled.getPolicyId()) == null) { - logger.debug(String.format("Snapshot's policy [%s] for volume [%s] has been removed; therefore, this snapshot will be removed from the snapshot scheduler.", - snapshotToBeScheduled.getPolicyId(), volume.getVolumeDescription())); + logger.debug("Snapshot's policy [{}] for volume [{}] has been removed; " + + "therefore, this snapshot will be removed from the snapshot scheduler.", + snapshotToBeScheduled.getPolicyId(), volume); _snapshotScheduleDao.remove(snapshotToBeScheduled.getId()); } - logger.debug(String.format("Snapshot [%s] for volume [%s] can be executed.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); + logger.debug("Snapshot [{}] for volume [{}] can be executed.", snapshotToBeScheduled, volume); return true; } @@ -336,14 +335,13 @@ protected boolean isAccountRemovedOrDisabled(final SnapshotScheduleVO snapshotTo Account volAcct = _acctDao.findById(volume.getAccountId()); if (volAcct == null) { - logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] has been removed.", snapshotToBeExecuted.getUuid(), - volume.getVolumeDescription(), volume.getAccountId())); + logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] has been removed.", + snapshotToBeExecuted, volume, volume.getAccountId())); return true; } if (volAcct.getState() == Account.State.DISABLED) { - logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] is disabled.", snapshotToBeExecuted.getUuid(), - volume.getVolumeDescription(), volAcct.getUuid())); + logger.debug("Skipping snapshot [{}] for volume [{}] because its account [{}] is disabled.", snapshotToBeExecuted, volume, volAcct); return true; } @@ -385,7 +383,7 @@ public Date scheduleNextSnapshotJob(final SnapshotPolicyVO policy) { } if (_volsDao.findById(policy.getVolumeId()) == null) { - logger.warn("Found snapshot policy ID: " + policyId + " for volume ID: " + policy.getVolumeId() + " that does not exist or has been removed"); + logger.warn("Found snapshot policy: {} for volume ID: {} that does not exist or has been removed", policy, policy.getVolumeId()); removeSchedule(policy.getVolumeId(), policy.getId()); return null; } @@ -440,7 +438,7 @@ public boolean removeSchedule(final Long volumeId, final Long policyId) { success = _snapshotScheduleDao.remove(schedule.getId()); } if (!success) { - logger.debug("Error while deleting Snapshot schedule with Id: " + schedule.getId()); + logger.debug("Error while deleting Snapshot schedule: " + schedule); } return success; } diff --git a/server/src/main/java/com/cloud/storage/upload/UploadListener.java b/server/src/main/java/com/cloud/storage/upload/UploadListener.java index 9709f5f94774..7c12387d7882 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadListener.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadListener.java @@ -349,7 +349,7 @@ public void setLastUpdated() { } public void log(String message, Level level) { - logger.log(level, message + ", " + type.toString() + " = " + typeName + " at host " + sserver.getName()); + logger.log(level, message + ", " + type.toString() + " = " + typeName + " at host " + sserver); } public void setDisconnected() { @@ -463,7 +463,7 @@ private UploadCommand getCommand() { } public void logDisconnect() { - logger.warn("Unable to monitor upload progress of " + typeName + " at host " + sserver.getName()); + logger.warn("Unable to monitor upload progress of {} at host {}", typeName, sserver); } public void scheduleImmediateStatusCheck(RequestType request) { diff --git a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java index 6b503ec3a503..7962d9dced9c 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java @@ -162,7 +162,7 @@ public void extractVolume(UploadVO uploadVolumeObj, DataStore secStore, VolumeVO } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - logger.warn("Unable to start upload of volume " + volume.getName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of volume {} from {} to {}", volume, secStore, url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -199,7 +199,7 @@ public Long extractTemplate(VMTemplateVO template, String url, TemplateDataStore } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - logger.warn("Unable to start upload of " + template.getUniqueName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of {} from {} to {}", template, secStore, url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -262,7 +262,7 @@ else if ((token != null) && (token.length == 5) && (token[2].startsWith(hostname CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)store).getMountPoint(), path, uuid, null, null); Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { - errorString = "Unable to create a link for " + type + " id:" + template.getId() + "," + (ans == null ? "" : ans.getDetails()); + errorString = String.format("Unable to create a link for %s [%s]: %s", type, template, ans == null ? "" : ans.getDetails()); logger.error(errorString); throw new CloudRuntimeException(errorString); } @@ -428,7 +428,7 @@ public void handleUploadSync(long sserverId) { logger.warn("Huh? Agent id " + sserverId + " does not correspond to a row in hosts table?"); return; } - logger.debug("Handling upload sserverId " + sserverId); + logger.debug("Handling upload sserver {}", storageHost); List uploadsInProgress = new ArrayList(); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.UPLOAD_IN_PROGRESS)); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.COPY_IN_PROGRESS)); @@ -494,7 +494,7 @@ public void cleanupStorage() { new DeleteEntityDownloadURLCommand(path, extractJob.getType(), extractJob.getUploadUrl(), ((ImageStoreVO)secStore).getParent()); EndPoint ep = _epSelector.select(secStore); if (ep == null) { - logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host " + extractJob.getDataStoreId()); + logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host {}", secStore); continue; //TODO: why continue? why not break? } if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java index d9c98e2ef920..c06cf5a03830 100644 --- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java @@ -165,12 +165,14 @@ private Pair getAccountDomain(long resourceId, ResourceObjectType re protected void checkTagsDeletePermission(List tagsToDelete, Account caller) { for (ResourceTag resourceTag : tagsToDelete) { + Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); if(logger.isDebugEnabled()) { - logger.debug("Resource Tag Id: " + resourceTag.getResourceId()); - logger.debug("Resource Tag AccountId: " + resourceTag.getAccountId()); + logger.debug("Resource Tag Id: {}", resourceTag.getResourceId()); + logger.debug("Resource Tag Uuid: {}", resourceTag.getResourceUuid()); + logger.debug("Resource Tag Type: {}", resourceTag.getResourceType()); + logger.debug("Resource Tag Account: {}", owner); } if (caller.getAccountId() != resourceTag.getAccountId()) { - Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); if(logger.isDebugEnabled()) { logger.debug("Resource Owner: " + owner); } @@ -203,8 +205,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { Long domainId = accountDomainPair.second(); Long accountId = accountDomainPair.first(); - resourceManagerUtil.checkResourceAccessible(accountId, domainId, "Account '" + caller + - "' doesn't have permissions to create tags" + " for resource '" + id + "(" + key + ")'."); + resourceManagerUtil.checkResourceAccessible(accountId, domainId, + String.format("Account '%s' doesn't have permissions to create tags for resource [id: %d, uuid: %s] (%s).", caller, id, resourceUuid, key)); String value = tags.get(key); @@ -216,7 +218,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { resourceTag = _resourceTagDao.persist(resourceTag); } catch (EntityExistsException e) { - throw new CloudRuntimeException(String.format("tag %s already on %s with id %s", resourceTag.getKey(), resourceType.toString(), resourceId),e); + throw new CloudRuntimeException(String.format("tag %s already on %s with id %s", resourceTag.getKey(), resourceType, resourceUuid),e); } resourceTags.add(resourceTag); if (ResourceObjectType.UserVm.equals(resourceType)) { @@ -319,7 +321,7 @@ private void informStoragePoolForVmTags(long vmId, String key, String value) { Long poolId = volume.getPoolId(); DataStore dataStore = retrieveDatastore(poolId); if (dataStore == null || !(dataStore.getDriver() instanceof PrimaryDataStoreDriver)) { - logger.info(String.format("No data store found for VM %d with pool ID %d.", vmId, poolId)); + logger.info("No data store found for volume {} of VM {} with pool ID {}.", volume, vmId, poolId); continue; } PrimaryDataStoreDriver dataStoreDriver = (PrimaryDataStoreDriver) dataStore.getDriver(); diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 026a9350f33f..fbf70a8eaade 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -182,10 +182,10 @@ private Long performDirectDownloadUrlValidation(final String format, final Hyper Integer connectRequestTimeout = DirectDownloadManager.DirectDownloadConnectionRequestTimeout.value(); Integer connectTimeout = DirectDownloadManager.DirectDownloadConnectTimeout.value(); CheckUrlCommand cmd = new CheckUrlCommand(format, url, connectTimeout, connectRequestTimeout, socketTimeout, followRedirects); - logger.debug("Performing URL " + url + " validation on host " + host.getId()); + logger.debug("Performing URL {} validation on host {}", url, host); Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { - throw new CloudRuntimeException("URL: " + url + " validation failed on host id " + host.getId()); + throw new CloudRuntimeException(String.format("URL: %s validation failed on host %s", url, host)); } CheckUrlAnswer ans = (CheckUrlAnswer) answer; return ans.getTemplateSize(); @@ -368,17 +368,17 @@ protected boolean isZoneAndImageStoreAvailable(DataStore imageStore, Long zoneId DataCenterVO zone = _dcDao.findById(zoneId); if (zone == null) { - logger.warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", zoneId, imageStore.getId())); + logger.warn("Unable to find zone by id [{}], so skip downloading template to its image store [{}].", zoneId, imageStore); return false; } if (Grouping.AllocationState.Disabled == zone.getAllocationState()) { - logger.info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, imageStore.getId())); + logger.info("Zone [{}] is disabled. Skip downloading template to its image store [{}].", zone, imageStore); return false; } if (!_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - logger.info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", imageStore.getId())); + logger.info("Image store doesn't have enough capacity. Skip downloading template to this image store [{}].", imageStore); return false; } @@ -473,7 +473,7 @@ private void postUploadAllocation(List imageStores, VMTemplateVO temp // update template_store_ref and template state EndPoint ep = _epSelector.select(templateOnStore); if (ep == null) { - String errMsg = "There is no secondary storage VM for downloading template to image store " + imageStore.getName(); + String errMsg = String.format("There is no secondary storage VM for downloading template to image store %s", imageStore); logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -539,8 +539,7 @@ protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher dataDiskTemplates = templateDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && dataDiskTemplates.size() > 0) { - logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template"); + logger.info("Template: {} has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template", template); for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete Datadisk template: {} from image store: {}", dataDiskTemplate, imageStore); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(dataDiskTemplate.getId(), imageStore)); try { TemplateApiResult result = future.get(); dataDiskDeletetionResult = result.isSuccess(); if (!dataDiskDeletetionResult) { - logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: " - + result.getResult()); + logger.warn("Failed to delete datadisk template: {} from image store: {} due to: {}", dataDiskTemplate, imageStore, result.getResult()); break; } // Remove from template_zone_ref @@ -664,13 +662,13 @@ public boolean delete(TemplateProfile profile) { } // remove from template_zone_ref if (dataDiskDeletetionResult) { - logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: {} from image store: {}", template, imageStore); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore)); try { TemplateApiResult result = future.get(); success = result.isSuccess(); if (!success) { - logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete the template: {} from the image store: {} due to: {}", template, imageStore, result.getResult()); break; } @@ -686,8 +684,8 @@ public boolean delete(TemplateProfile profile) { throw new CloudRuntimeException("Delete template Failed", e); } } else { - logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk" - + " templates that belonged to the template failed"); + logger.warn("Template: {} won't be deleted from image store: {} " + + "because deletion of one of the Datadisk templates that belonged to the template failed", template, imageStore); } } @@ -701,7 +699,7 @@ public boolean delete(TemplateProfile profile) { // delete all cache entries for this template List cacheTmpls = imageFactory.listTemplateOnCache(template.getId()); for (TemplateInfo tmplOnCache : cacheTmpls) { - logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName()); + logger.info("Delete template: {} from image cache store: {}", tmplOnCache, tmplOnCache.getDataStore()); tmplOnCache.delete(); } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index 119589dcc65f..d1a491cde665 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -236,7 +236,7 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d } Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } } } diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 41e0b6f93fff..6073b4f0bb7e 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -525,8 +525,10 @@ public VirtualMachineTemplate prepareTemplate(long templateId, long zoneId, Long if (pool.getStatus() == StoragePoolStatus.Up && pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(vmTemplate, pool); } else { - logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " - + pool.getDataCenterId() + " is different from the requested zone " + zoneId + " or the pool is currently not available."); + logger.warn("Skip loading template {} into primary storage {} as " + + "either the pool zone {} is different from the requested zone {} or " + + "the pool is currently not available.", + vmTemplate::toString, pool::toString, () -> _dcDao.findById(pool.getDataCenterId()), () -> _dcDao.findById(zoneId)); } } } else { @@ -568,7 +570,7 @@ private String extract(Account caller, Long templateId, String url, Long zoneId, } if (!_accountMgr.isRootAdmin(caller.getId()) && !template.isExtractable()) { - throw new InvalidParameterValueException("Unable to extract template id=" + templateId + " as it's not extractable"); + throw new InvalidParameterValueException(String.format("Unable to extract template %s as it's not extractable", template)); } _accountMgr.checkAccess(caller, AccessType.OperateEntry, true, template); @@ -602,7 +604,7 @@ private String extract(Account caller, Long templateId, String url, Long zoneId, } // Handle NFS to S3 object store migration case, we trigger template sync from NFS to S3 during extract template or copy template - _tmpltSvr.syncTemplateToRegionStore(templateId, tmpltStore); + _tmpltSvr.syncTemplateToRegionStore(template, tmpltStore); TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, tmpltStore); String extractUrl = tmpltStore.createEntityExtractUrl(templateObject.getInstallPath(), template.getFormat(), templateObject); @@ -657,10 +659,10 @@ public void prepareIsoForVmProfile(VirtualMachineProfile profile, DeployDestinat } private void prepareTemplateInOneStoragePool(final VMTemplateVO template, final StoragePoolVO pool) { - logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Schedule to preload template {} into primary storage {}", template, pool); if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { List childDataStores = _poolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); - logger.debug("Schedule to preload template " + template.getId() + " into child datastores of DataStore cluster: " + pool.getId()); + logger.debug("Schedule to preload template {} into child datastores of DataStore cluster: {}", template, pool); for (StoragePoolVO childDataStore : childDataStores) { prepareTemplateInOneStoragePoolInternal(template, childDataStore); } @@ -681,10 +683,10 @@ protected void runInContext() { } private void reallyRun() { - logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Start to preload template {} into primary storage {}", template, pool); StoragePool pol = (StoragePool)_dataStoreMgr.getPrimaryDataStore(pool.getId()); prepareTemplateForCreate(template, pol); - logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("End of preloading template {} into primary storage {}", template, pool); } }); } @@ -695,8 +697,9 @@ public void prepareTemplateInAllStoragePools(final VMTemplateVO template, long z if (pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(template, pool); } else { - logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + - " is different from the requested zone " + zoneId); + logger.info("Skip loading template {} into primary storage {} as pool " + + "zone {} is different from the requested zone {}", template::toString, pool::toString, + () -> _dcDao.findById(pool.getDataCenterId()), () -> _dcDao.findById(zoneId)); } } } @@ -718,7 +721,7 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor if (templateStoragePoolRef.getDownloadState() == Status.DOWNLOADED) { if (logger.isDebugEnabled()) { - logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); + logger.debug("Template {} has already been downloaded to pool {}", template, pool); } return templateStoragePoolRef; @@ -733,12 +736,12 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor List vos = _poolHostDao.listByHostStatus(poolId, com.cloud.host.Status.Up); if (vos == null || vos.isEmpty()) { - throw new CloudRuntimeException("Cannot download " + templateId + " to poolId " + poolId + " since there is no host in the Up state connected to this pool"); + throw new CloudRuntimeException(String.format("Cannot download %s to pool %s since there is no host in the Up state connected to this pool", template, pool)); } if (templateStoragePoolRef == null) { if (logger.isDebugEnabled()) { - logger.debug("Downloading template " + templateId + " to pool " + poolId); + logger.debug("Downloading template {} to pool {}", template, pool); } DataStore srcSecStore = _dataStoreMgr.getDataStore(templateStoreRef.getDataStoreId(), DataStoreRole.Image); TemplateInfo srcTemplate = _tmplFactory.getTemplate(templateId, srcSecStore); @@ -753,7 +756,7 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor return _tmpltPoolDao.findByPoolTemplate(poolId, templateId, null); } catch (Exception ex) { - logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); + logger.debug("failed to copy template from image store {} to primary storage", srcSecStore); } } @@ -844,7 +847,7 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D try { TemplateApiResult result = future.get(); if (result.isFailed()) { - logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); + logger.debug("copy template failed for image store {}: {}", dstSecStore, result.getResult()); continue; // try next image store } @@ -859,26 +862,24 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D List dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) { for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); + logger.debug("Copying {} for source template {}. Copy all Datadisk templates to destination datastore {}", dataDiskTemplates.size(), template, dstSecStore); TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore); AsyncCallFuture dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore); try { TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get(); if (dataDiskCopyResult.isFailed()) { - logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() - + " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one"); + logger.error("Copy of datadisk template: {} to image store: {} failed with error: {} , will try copying the next one", srcDataDiskTemplate, dstSecStore, dataDiskCopyResult.getResult()); continue; // Continue to copy next Datadisk template } _tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId); _resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize()); } catch (Exception ex) { - logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() - + " , will try copying the next one"); + logger.error("Failed to copy datadisk template: {} to image store: {} , will try copying the next one", srcDataDiskTemplate, dstSecStore); } } } } catch (Exception ex) { - logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); + logger.debug("failed to copy template to image store:{} ,will try next one", dstSecStore); } } return true; @@ -902,7 +903,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn // Verify template is not Datadisk template if (template.getTemplateType().equals(TemplateType.DATADISK)) { - throw new InvalidParameterValueException("Template " + template.getId() + " is of type Datadisk. Cannot copy Datadisk templates."); + throw new InvalidParameterValueException(String.format("Template %s is of type Datadisk. Cannot copy Datadisk templates.", template)); } if (sourceZoneId != null) { @@ -933,7 +934,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn boolean success = false; if (template.getHypervisorType() == HypervisorType.BareMetal) { if (template.isCrossZones()) { - logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); + logger.debug("Template {} is cross-zone, don't need to copy", template); return template; } for (Long destZoneId: destZoneIds) { @@ -952,18 +953,17 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn } if (srcSecStore == null) { - throw new InvalidParameterValueException("There is no template " + templateId + " ready on image store."); + throw new InvalidParameterValueException(String.format("There is no template %s ready on image store.", template)); } if (template.isCrossZones()) { // sync template from cache store to region store if it is not there, for cases where we are going to migrate existing NFS to S3. - _tmpltSvr.syncTemplateToRegionStore(templateId, srcSecStore); + _tmpltSvr.syncTemplateToRegionStore(template, srcSecStore); } for (Long destZoneId : destZoneIds) { DataStore dstSecStore = getImageStore(destZoneId, templateId); if (dstSecStore != null) { - logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + - " in zone " + destZoneId + " , don't need to copy"); + logger.debug("There is template {} in secondary storage {} in zone {} , don't need to copy", template, dstSecStore, dataCenterVOs.get(destZoneId)); continue; } if (!copy(userId, template, srcSecStore, dataCenterVOs.get(destZoneId))) { @@ -1004,7 +1004,7 @@ private boolean addTemplateToZone(VMTemplateVO template, long dstZoneId, long so _tmpltDao.addTemplateToZone(template, dstZoneId); return true; } catch (Exception ex) { - logger.debug("failed to copy template from Zone: " + sourceZone.getUuid() + " to Zone: " + dstZone.getUuid()); + logger.debug("failed to copy template from Zone: {} to Zone: {}", sourceZone, dstZone); } return false; } @@ -1055,7 +1055,7 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); if (templatePoolRef == null) { - logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); + logger.debug("Can't acquire the lock for template pool ref: {}", templatePoolVO); return; } @@ -1074,11 +1074,11 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) TemplateApiResult result = future.get(); if (result.isFailed()) { - logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); + logger.debug("Failed to delete template {} from storage pool {}", template, pool); } else { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + logger.debug("Successfully evicted template {} from storage pool {}", template, pool); } } } else { @@ -1088,14 +1088,14 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) if (answer != null && answer.getResult()) { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + logger.debug("Successfully evicted template {} from storage pool {}", template, pool); } } else { - logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); + logger.info("Will retry evict template {} from storage pool {}", template, pool); } } } catch (StorageUnavailableException | InterruptedException | ExecutionException e) { - logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); + logger.info("Storage is unavailable currently. Will retry evicte template {} from storage pool {}", template, pool); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); } @@ -1340,7 +1340,7 @@ public boolean deleteTemplate(DeleteTemplateCmd cmd) { vmInstanceVOList = _vmInstanceDao.listNonExpungedByTemplate(templateId); } if(!cmd.isForced() && CollectionUtils.isNotEmpty(vmInstanceVOList)) { - final String message = String.format("Unable to delete template with id: %1$s because VM instances: [%2$s] are using it.", templateId, Joiner.on(",").join(vmInstanceVOList)); + final String message = String.format("Unable to delete template: %s because VM instances: [%s] are using it.", template, Joiner.on(",").join(vmInstanceVOList)); logger.warn(message); throw new InvalidParameterValueException(message); } @@ -1488,7 +1488,7 @@ public boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissions } if (!_projectMgr.canAccessProjectAccount(caller, project.getProjectAccountId())) { - throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + projectId); + throw new InvalidParameterValueException("Account " + caller + " can't access project id=" + project.getUuid()); } accountNames.add(_accountMgr.getAccount(project.getProjectAccountId()).getAccountName()); } @@ -1849,7 +1849,7 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t // will not be active when the private template is // created if (!_volumeMgr.volumeInactive(volume)) { - String msg = "Unable to create private template for volume: " + volume.getName() + "; volume is attached to a non-stopped VM, please stop the VM first"; + String msg = String.format("Unable to create private template for volume: %s; volume is attached to a non-stopped VM, please stop the VM first", volume); if (logger.isInfoEnabled()) { logger.info(msg); } @@ -1858,7 +1858,7 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t hyperType = _volumeDao.getHypervisorType(volumeId); if (HypervisorType.LXC.equals(hyperType)) { - throw new InvalidParameterValueException("Template creation is not supported for LXC volume: " + volumeId); + throw new InvalidParameterValueException(String.format("Template creation is not supported for LXC volume: %s", volume)); } } else { // create template from snapshot snapshot = _snapshotDao.findById(snapshotId); @@ -1877,8 +1877,8 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t _accountMgr.checkAccess(caller, null, true, snapshot); if (snapshot.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + - " state yet and can't be used for template creation"); + throw new InvalidParameterValueException(String.format("Snapshot %s is not in %s state yet and can't be used for template creation", + snapshot, Snapshot.State.BackedUp)); } /* @@ -2053,7 +2053,8 @@ public DataStore getImageStore(long tmpltId) { } @Override - public Long getTemplateSize(long templateId, long zoneId) { + public Long getTemplateSize(VirtualMachineTemplate template, long zoneId) { + long templateId = template.getId(); if (_tmplStoreDao.isTemplateMarkedForDirectDownload(templateId)) { // check if template is marked for direct download return _tmplStoreDao.getReadyBypassedTemplate(templateId).getSize(); @@ -2064,7 +2065,7 @@ public Long getTemplateSize(long templateId, long zoneId) { templateStoreRef = _tmplStoreDao.findByTemplateZoneStagingDownloadStatus(templateId, zoneId, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (templateStoreRef == null) { - throw new CloudRuntimeException("Template " + templateId + " has not been completely downloaded to zone " + zoneId); + throw new CloudRuntimeException(String.format("Template %s has not been completely downloaded to zone %s", template, _dcDao.findById(zoneId))); } } return templateStoreRef.getSize(); @@ -2331,8 +2332,7 @@ void validateDetails(VMTemplateVO template, Map details) { return; } if (template.isDeployAsIs()) { - String msg = String.format("Deploy-as-is template %s [%s] can not have the UEFI setting. Settings are read directly from the template", - template.getName(), template.getUuid()); + String msg = String.format("Deploy-as-is template %s can not have the UEFI setting. Settings are read directly from the template", template); throw new InvalidParameterValueException(msg); } try { @@ -2377,10 +2377,11 @@ public void setTemplateAdapters(List adapters) { } @Override - public List getTemplateDisksOnImageStore(Long templateId, DataStoreRole role, String configurationId) { + public List getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId) { + long templateId = template.getId(); TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, role); if (templateObject == null) { - String msg = String.format("Could not find template %s downloaded on store with role %s", templateId, role.toString()); + String msg = String.format("Could not find template %s downloaded on store with role %s", template, role.toString()); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index 421d2587441d..1e60cade915c 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -190,7 +190,7 @@ public Pair, Integer> getUsageRecords(ListUsageRecordsCmd //List records for all the accounts if the caller account is of type admin. //If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin ignoreAccountId = _accountService.isRootAdmin(caller.getId()); - logger.debug("Account details not available. Using userContext accountId: " + accountId); + logger.debug("Account details not available. Using userContext accountId: {}", caller); } // Check if a domain admin is allowed to access the requested domain id diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index bea799944bef..ef7f99817d4d 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -41,6 +41,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.host.dao.HostDao; import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.InfrastructureEntity; @@ -273,6 +274,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Inject private IPAddressDao _ipAddressDao; @Inject + private HostDao hostDao; + @Inject private VpcManager _vpcMgr; @Inject private NetworkModel _networkModel; @@ -758,7 +761,7 @@ public void validateAccountHasAccessToResource(Account account, AccessType acces } else if (InfrastructureEntity.class.isAssignableFrom(resourceClass)) { logger.trace("Validation of access to infrastructure entity has been disabled in CloudStack version 4.4."); } - logger.debug(String.format("Account [%s] has access to resource.", account.getUuid())); + logger.debug("Account [{}] has access to resource.", account); } @Override @@ -805,7 +808,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - logger.error("Failed to update login attempts for user with id " + id); + logger.error("Failed to update login attempts for user {}", () -> _userAccountDao.findById(id)); } } @@ -837,7 +840,7 @@ protected boolean lockAccount(long accountId) { success = _accountDao.update(Long.valueOf(accountId), acctForUpdate); } else { if (logger.isInfoEnabled()) { - logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); + logger.info("Attempting to lock a non-enabled account {}, current state is {}, locking failed.", account, account.getState()); } } } else { @@ -852,7 +855,7 @@ public boolean deleteAccount(AccountVO account, long callerUserId, Account calle // delete the account record if (!_accountDao.remove(accountId)) { - logger.error("Unable to delete account " + accountId); + logger.error("Unable to delete account {}", account); return false; } @@ -860,7 +863,7 @@ public boolean deleteAccount(AccountVO account, long callerUserId, Account calle _accountDao.update(accountId, account); if (logger.isDebugEnabled()) { - logger.debug("Removed account " + accountId); + logger.debug("Removed account {}", account); } return cleanupAccount(account, callerUserId, caller); @@ -881,7 +884,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c } // delete autoscaling VM groups - if (!_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(accountId)) { + if (!_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(account)) { accountCleanupNeeded = true; } @@ -889,7 +892,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c // delete global load balancer rules for the account. List gslbRules = _gslbRuleDao.listByAccount(accountId); if (gslbRules != null && !gslbRules.isEmpty()) { - _gslbService.revokeAllGslbRulesForAccount(caller, accountId); + _gslbService.revokeAllGslbRulesForAccount(caller, account); } // delete the account from project accounts @@ -904,15 +907,15 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c List groups = _vmGroupDao.listByAccountId(accountId); for (InstanceGroupVO group : groups) { if (!_vmMgr.deleteVmGroup(group.getId())) { - logger.error("Unable to delete group: " + group.getId()); + logger.error("Unable to delete group: {}", group); accountCleanupNeeded = true; } } // Delete the snapshots dir for the account. Have to do this before destroying the VMs. - boolean success = _snapMgr.deleteSnapshotDirsForAccount(accountId); + boolean success = _snapMgr.deleteSnapshotDirsForAccount(account); if (success) { - logger.debug("Successfully deleted snapshots directories for all volumes under account " + accountId + " across all zones"); + logger.debug("Successfully deleted snapshots directories for all volumes under account {} across all zones", account); } // clean up templates @@ -923,14 +926,14 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { allTemplatesDeleted = _tmpltMgr.delete(callerUserId, template.getId(), null); } catch (Exception e) { - logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); + logger.warn("Failed to delete template {} while removing account {} due to: ", template, account, e); allTemplatesDeleted = false; } } } if (!allTemplatesDeleted) { - logger.warn("Failed to delete templates while removing account id=" + accountId); + logger.warn("Failed to delete templates while removing account {}", account); accountCleanupNeeded = true; } @@ -940,14 +943,14 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { _vmSnapshotMgr.deleteVMSnapshot(vmSnapshot.getId()); } catch (Exception e) { - logger.debug("Failed to cleanup vm snapshot " + vmSnapshot.getId() + " due to " + e.toString()); + logger.debug("Failed to cleanup vm snapshot {} due to {}", vmSnapshot, e.toString()); } } // Destroy the account's VMs List vms = _userVmDao.listByAccountId(accountId); if (logger.isDebugEnabled()) { - logger.debug("Expunging # of vms (accountId=" + accountId + "): " + vms.size()); + logger.debug("Expunging # of vms (account={}): {}", account, vms.size()); } for (UserVmVO vm : vms) { @@ -956,13 +959,13 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c _vmMgr.destroyVm(vm.getId(), false); } catch (Exception e) { e.printStackTrace(); - logger.warn("Failed destroying instance " + vm.getUuid() + " as part of account deletion."); + logger.warn("Failed destroying instance {} as part of account deletion.", vm); } } // no need to catch exception at this place as expunging vm // should pass in order to perform further cleanup if (!_vmMgr.expunge(vm)) { - logger.error("Unable to expunge vm: " + vm.getId()); + logger.error("Unable to expunge vm: {}", vm); accountCleanupNeeded = true; } } @@ -973,7 +976,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { volumeService.deleteVolume(volume.getId(), caller); } catch (Exception ex) { - logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup volumes as a part of account {} cleanup due to Exception: ", account, ex); accountCleanupNeeded = true; } } @@ -983,7 +986,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c List vpnUsers = _vpnUser.listByAccount(accountId); for (VpnUserVO vpnUser : vpnUsers) { - _remoteAccessVpnMgr.removeVpnUser(accountId, vpnUser.getUsername(), caller); + _remoteAccessVpnMgr.removeVpnUser(account, vpnUser.getUsername(), caller); } try { @@ -991,7 +994,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, false); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup remote access vpn resources as a part of account {} cleanup due to Exception: ", account, ex); accountCleanupNeeded = true; } @@ -1003,15 +1006,15 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c // Cleanup security groups int numRemoved = _securityGroupDao.removeByAccountId(accountId); - logger.info("deleteAccount: Deleted " + numRemoved + " network groups for account " + accountId); + logger.info("deleteAccount: Deleted {} network groups for account {}", numRemoved, account); // Cleanup affinity groups int numAGRemoved = _affinityGroupDao.removeByAccountId(accountId); - logger.info("deleteAccount: Deleted " + numAGRemoved + " affinity groups for account " + accountId); + logger.info("deleteAccount: Deleted {} affinity groups for account {}", numAGRemoved, account); // Delete all the networks boolean networksDeleted = true; - logger.debug("Deleting networks for account " + account.getId()); + logger.debug("Deleting networks for account {}", account); List networks = _networkDao.listByOwner(accountId); if (networks != null) { Collections.sort(networks, new Comparator() { @@ -1031,27 +1034,27 @@ public int compare(NetworkVO network1, NetworkVO network2) { ReservationContext context = new ReservationContextImpl(null, null, getActiveUser(callerUserId), caller); if (!_networkMgr.destroyNetwork(network.getId(), context, false)) { - logger.warn("Unable to destroy network " + network + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy network {} as a part of account {} cleanup.", network, account); accountCleanupNeeded = true; networksDeleted = false; } else { - logger.debug("Network " + network.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("Network {} successfully deleted as a part of account {} cleanup.", network, account); } } } // Delete all VPCs boolean vpcsDeleted = true; - logger.debug("Deleting vpcs for account " + account.getId()); + logger.debug("Deleting vpcs for account {}", account); List vpcs = _vpcMgr.getVpcsForAccount(account.getId()); for (Vpc vpc : vpcs) { if (!_vpcMgr.destroyVpc(vpc, caller, callerUserId)) { - logger.warn("Unable to destroy VPC " + vpc + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy VPC {} as a part of account {} cleanup.", vpc, account); accountCleanupNeeded = true; vpcsDeleted = false; } else { - logger.debug("VPC " + vpc.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("VPC {} successfully deleted as a part of account {} cleanup.", vpc, account); } } @@ -1059,35 +1062,35 @@ public int compare(NetworkVO network1, NetworkVO network2) { // release ip addresses belonging to the account List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { - logger.debug("Releasing ip " + ip + " as a part of account id=" + accountId + " cleanup"); - if (!_ipAddrMgr.disassociatePublicIpAddress(ip.getId(), callerUserId, caller)) { - logger.warn("Failed to release ip address " + ip + " as a part of account id=" + accountId + " clenaup"); + logger.debug("Releasing ip {} as a part of account {} cleanup", ip, account); + if (!_ipAddrMgr.disassociatePublicIpAddress(ip, callerUserId, caller)) { + logger.warn("Failed to release ip address {} as a part of account {} cleanup", ip, account); accountCleanupNeeded = true; } } } // Delete Site 2 Site VPN customer gateway - logger.debug("Deleting site-to-site VPN customer gateways for account " + accountId); + logger.debug("Deleting site-to-site VPN customer gateways for account {}", account); if (!_vpnMgr.deleteCustomerGatewayByAccount(accountId)) { - logger.warn("Fail to delete site-to-site VPN customer gateways for account " + accountId); + logger.warn("Fail to delete site-to-site VPN customer gateways for account {}", account); } // Delete autoscale resources if any try { - _autoscaleMgr.cleanUpAutoScaleResources(accountId); + _autoscaleMgr.cleanUpAutoScaleResources(account); } catch (CloudRuntimeException ex) { - logger.warn("Failed to cleanup AutoScale resources as a part of account id=" + accountId + " cleanup due to exception:", ex); + logger.warn("Failed to cleanup AutoScale resources as a part of account {} cleanup due to exception:", account, ex); accountCleanupNeeded = true; } // release account specific Virtual vlans (belong to system Public Network) - only when networks are cleaned // up successfully if (networksDeleted) { - if (!_configMgr.releaseAccountSpecificVirtualRanges(accountId)) { + if (!_configMgr.releaseAccountSpecificVirtualRanges(account)) { accountCleanupNeeded = true; } else { - logger.debug("Account specific Virtual IP ranges " + " are successfully released as a part of account id=" + accountId + " cleanup."); + logger.debug("Account specific Virtual IP ranges are successfully released as a part of account {} cleanup.", account); } } @@ -1103,14 +1106,14 @@ public int compare(NetworkVO network1, NetworkVO network2) { _dataCenterVnetDao.releaseDedicatedGuestVlans(map.getId()); } int vlansReleased = _accountGuestVlanMapDao.removeByAccountId(accountId); - logger.info("deleteAccount: Released " + vlansReleased + " dedicated guest vlan ranges from account " + accountId); + logger.info("deleteAccount: Released {} dedicated guest vlan ranges from account {}", vlansReleased, account); // release account specific acquired portable IP's. Since all the portable IP's must have been already // disassociated with VPC/guest network (due to deletion), so just mark portable IP as free. List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { if (ip.isPortable()) { - logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); + logger.debug("Releasing portable ip {} as a part of account {} cleanup", ip, account); _ipAddrMgr.releasePortableIpAddress(ip.getId()); } } @@ -1118,10 +1121,10 @@ public int compare(NetworkVO network1, NetworkVO network2) { // release dedication if any List dedicatedResources = _dedicatedDao.listByAccountId(accountId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - logger.debug("Releasing dedicated resources for account " + accountId); + logger.debug("Releasing dedicated resources for account {}", account); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - logger.warn("Fail to release dedicated resources for account " + accountId); + logger.warn("Fail to release dedicated resources for account {}", account); } } } @@ -1156,7 +1159,7 @@ public int compare(NetworkVO network1, NetworkVO network2) { accountCleanupNeeded = true; return true; } finally { - logger.info("Cleanup for account " + account.getId() + (accountCleanupNeeded ? " is needed." : " is not needed.")); + logger.info("Cleanup for account {} {}", account, accountCleanupNeeded ? "is needed." : "is not needed."); if (accountCleanupNeeded) { _accountDao.markForCleanup(accountId); } else { @@ -1211,11 +1214,11 @@ private boolean doDisableAccount(long accountId) throws ConcurrentOperationExcep try { _itMgr.advanceStop(vm.getUuid(), false); } catch (OperationTimedoutException ote) { - logger.warn("Operation for stopping vm timed out, unable to stop vm " + vm.getHostName(), ote); + logger.warn("Operation for stopping vm timed out, unable to stop vm {}", vm, ote); success = false; } } catch (AgentUnavailableException aue) { - logger.warn("Agent running on host " + vm.getHostId() + " is unavailable, unable to stop vm " + vm.getHostName(), aue); + logger.warn("Agent running on host {} is unavailable, unable to stop vm {}", () -> hostDao.findById(vm.getHostId()), vm::toString, () -> aue); success = false; } } @@ -1277,7 +1280,7 @@ public UserAccount createUserAccount(final String userName, final String passwor checkAccess(getCurrentCallingAccount(), domain); if (!_userAccountDao.validateUsernameInDomain(userName, domainId)) { - throw new InvalidParameterValueException("The user " + userName + " already exists in domain " + domainId); + throw new InvalidParameterValueException(String.format("The user %s already exists in domain %s", userName, domain)); } if (networkDomain != null && networkDomain.length() > 0) { @@ -1383,10 +1386,8 @@ private void checkRoleEscalation(Account caller, Account requested) { } checkApiAccess(apiCheckers, caller, command); } catch (PermissionDeniedException pde) { - String msg = String.format("User of Account %s/%s (%s) can not create an account with access to more privileges they have themself.", - caller.getAccountName(), - caller.getDomainId(), - caller.getUuid()); + String msg = String.format("User of Account %s and domain %s can not create an account with access to more privileges they have themself.", + caller, _domainMgr.getDomain(caller.getDomainId())); logger.warn(msg); throw new PermissionDeniedException(msg,pde); } @@ -1441,15 +1442,15 @@ public UserVO createUser(String userName, String password, String firstName, Str Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null || account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find account " + accountName + " in domain id=" + domainId + " to create user"); + throw new InvalidParameterValueException(String.format("Unable to find account %s in domain %s to create user", accountName, domain)); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + account.getId() + " is a system account, can't add a user to it"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, can't add a user to it", account)); } if (!_userAccountDao.validateUsernameInDomain(userName, domainId)) { - throw new CloudRuntimeException("The user " + userName + " already exists in domain " + domainId); + throw new CloudRuntimeException(String.format("The user %s already exists in domain %s", userName, domain)); } UserVO user = null; user = createUser(account.getId(), userName, password, firstName, lastName, email, timeZone, userUUID, source); @@ -1467,7 +1468,7 @@ public UserVO createUser(String userName, String password, String firstName, Str @ActionEvent(eventType = EventTypes.EVENT_USER_UPDATE, eventDescription = "Updating User") public UserAccount updateUser(UpdateUserCmd updateUserCmd) { UserVO user = retrieveAndValidateUser(updateUserCmd); - logger.debug("Updating user with Id: " + user.getUuid()); + logger.debug("Updating user {}", user); validateAndUpdateApiAndSecretKeyIfNeeded(updateUserCmd, user); validateAndUpdateUserApiKeyAccess(updateUserCmd, user); @@ -1509,7 +1510,7 @@ public UserAccount updateUser(UpdateUserCmd updateUserCmd) { */ public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword, boolean skipCurrentPassValidation) { if (newPassword == null) { - logger.trace("No new password to update for user: " + user.getUuid()); + logger.trace("No new password to update for user: {}", user); return; } if (StringUtils.isBlank(newPassword)) { @@ -1518,7 +1519,7 @@ public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO use User.Source userSource = user.getSource(); if (userSource == User.Source.SAML2 || userSource == User.Source.SAML2DISABLED || userSource == User.Source.LDAP) { - logger.warn(String.format("Unable to update the password for user [%d], as its source is [%s].", user.getId(), user.getSource().toString())); + logger.warn("Unable to update the password for user [{}], as its source is [{}].", user, user.getSource().toString()); throw new InvalidParameterValueException("CloudStack does not support updating passwords for SAML or LDAP users. Please contact your cloud administrator for assistance."); } @@ -1530,7 +1531,7 @@ public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO use boolean isAdmin = isDomainAdmin || isRootAdminExecutingPasswordUpdate; boolean skipValidation = isAdmin || skipCurrentPassValidation; if (isAdmin) { - logger.trace(String.format("Admin account [uuid=%s] executing password update for user [%s] ", callingAccount.getUuid(), user.getUuid())); + logger.trace("Admin account [{}] executing password update for user [{}] ", callingAccount, user); } if (!skipValidation && StringUtils.isBlank(currentPassword)) { throw new InvalidParameterValueException("To set a new password the current password must be provided."); @@ -1560,7 +1561,7 @@ protected void validateCurrentPassword(UserVO user, String currentPassword) { continue; } if (BooleanUtils.toBoolean(authenticationResult.first())) { - logger.debug(String.format("User [id=%s] re-authenticated [authenticator=%s] during password update.", user.getUuid(), userAuthenticator.getName())); + logger.debug("User [{}] re-authenticated [authenticator={}] during password update.", user, userAuthenticator.getName()); currentPasswordMatchesDataBasePassword = true; break; } @@ -1594,7 +1595,7 @@ protected void validateAndUpdateUsernameIfNeeded(UpdateUserCmd updateUserCmd, Us Account duplicatedUserAccountWithUserThatHasTheSameUserName = _accountDao.findById(duplicatedUser.getAccountId()); if (duplicatedUserAccountWithUserThatHasTheSameUserName.getDomainId() == account.getDomainId()) { DomainVO domain = _domainDao.findById(duplicatedUserAccountWithUserThatHasTheSameUserName.getDomainId()); - throw new InvalidParameterValueException(String.format("Username [%s] already exists in domain [id=%s,name=%s]", duplicatedUser.getUsername(), domain.getUuid(), domain.getName())); + throw new InvalidParameterValueException(String.format("Username (%s) already exists in domain (%s)", duplicatedUser, domain)); } } user.setUsername(userName); @@ -1651,10 +1652,10 @@ protected Account retrieveAndValidateAccount(UserVO user) { throw new CloudRuntimeException("Unable to find user account with ID: " + user.getAccountId()); } if (account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find user with ID: " + user.getUuid()); + throw new InvalidParameterValueException(String.format("Unable to find user with: %s", user)); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("user UUID : " + user.getUuid() + " is a system account; update is not allowed."); + throw new PermissionDeniedException(String.format("user: %s is a system account; update is not allowed.", user)); } checkAccess(getCurrentCallingAccount(), AccessType.OperateEntry, true, account); return account; @@ -1761,12 +1762,12 @@ public UserAccount disableUser(long userId) { // don't allow disabling user belonging to project's account if (account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find active user by id " + userId); + throw new InvalidParameterValueException(String.format("Unable to find active user %s", user)); } // If the user is a System user, return an error if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new InvalidParameterValueException("User id : " + userId + " is a system user, disabling is not allowed"); + throw new InvalidParameterValueException(String.format("User: %s is a system user, disabling is not allowed", user)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -1779,7 +1780,7 @@ public UserAccount disableUser(long userId) { // user successfully disabled return _userAccountDao.findById(userId); } else { - throw new CloudRuntimeException("Unable to disable user " + userId); + throw new CloudRuntimeException(String.format("Unable to disable user %s", user)); } } @@ -1802,12 +1803,12 @@ public UserAccount enableUser(final long userId) { } if (account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find active user by id " + userId); + throw new InvalidParameterValueException(String.format("Unable to find active user %s", user)); } // If the user is a System user, return an error if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new InvalidParameterValueException("User id : " + userId + " is a system user, enabling is not allowed"); + throw new InvalidParameterValueException(String.format("User: %s is a system user, enabling is not allowed", user)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -1832,7 +1833,7 @@ public Boolean doInTransaction(TransactionStatus status) { return _userAccountDao.findById(userId); } else { - throw new CloudRuntimeException("Unable to enable user " + userId); + throw new CloudRuntimeException(String.format("Unable to enable user %s", user)); } } @@ -1859,7 +1860,7 @@ public UserAccount lockUser(long userId) { // If the user is a System user, return an error. We do not allow this if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("user id : " + userId + " is a system user, locking is not allowed"); + throw new PermissionDeniedException(String.format("user: %s is a system user, locking is not allowed", user)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -1888,7 +1889,7 @@ public UserAccount lockUser(long userId) { } } else { if (logger.isInfoEnabled()) { - logger.info("Attempting to lock a non-enabled user, current state is " + user.getState() + " (userId: " + user.getId() + "), locking failed."); + logger.info("Attempting to lock a non-enabled user {}, current state is {}, locking failed.", user, user.getState()); } success = false; } @@ -1899,7 +1900,7 @@ public UserAccount lockUser(long userId) { return _userAccountDao.findById(userId); } else { - throw new CloudRuntimeException("Unable to lock user " + userId); + throw new CloudRuntimeException(String.format("Unable to lock user %s", user)); } } @@ -1933,7 +1934,7 @@ public boolean deleteUserAccount(long accountId) { projectIds.append(projectId).append(", "); } - throw new InvalidParameterValueException("The account id=" + accountId + " manages project(s) with ids " + projectIds + "and can't be removed"); + throw new InvalidParameterValueException(String.format("The account %s with id %d manages project(s) with ids %s and can't be removed", account, accountId, projectIds)); } CallContext.current().putContextParameter(Account.class, account.getUuid()); @@ -1947,7 +1948,7 @@ protected boolean isDeleteNeeded(AccountVO account, long accountId, Account call return false; } if (account.getRemoved() != null) { - logger.info("The account:" + account.getAccountName() + " is already removed"); + logger.info("The account:{} is already removed", account); return false; } // don't allow removing Project account @@ -1977,11 +1978,11 @@ public AccountVO enableAccount(String accountName, Long domainId, Long accountId } if (account == null || account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new InvalidParameterValueException(String.format("Unable to find account by accountId: %d OR by name: %s in domain %s", accountId, accountName, _domainMgr.getDomain(domainId))); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + accountId + " is a system account, enable is not allowed"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, enable is not allowed", account)); } // Check if user performing the action is allowed to modify this account @@ -1995,7 +1996,7 @@ public AccountVO enableAccount(String accountName, Long domainId, Long accountId return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to enable account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to enable account %s in domain %s", account, accountName, _domainMgr.getDomain(domainId))); } } @@ -2016,7 +2017,7 @@ public AccountVO lockAccount(String accountName, Long domainId, Long accountId) } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + accountId + " is a system account, lock is not allowed"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, lock is not allowed", account)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -2025,7 +2026,7 @@ public AccountVO lockAccount(String accountName, Long domainId, Long accountId) CallContext.current().putContextParameter(Account.class, account.getUuid()); return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to lock account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to lock account %s by accountId: %d OR by name: %s in domain %d", account, accountId, accountName, _domainMgr.getDomain(domainId))); } } @@ -2042,11 +2043,11 @@ public AccountVO disableAccount(String accountName, Long domainId, Long accountI } if (account == null || account.getType() == Account.Type.PROJECT) { - throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new InvalidParameterValueException(String.format("Unable to find account by accountId: %d OR by name: %s in domain %s", accountId, accountName, _domainMgr.getDomain(domainId))); } if (account.getId() == Account.ACCOUNT_ID_SYSTEM) { - throw new PermissionDeniedException("Account id : " + accountId + " is a system account, disable is not allowed"); + throw new PermissionDeniedException(String.format("Account: %s is a system account, disable is not allowed", account)); } checkAccess(caller, AccessType.OperateEntry, true, account); @@ -2055,7 +2056,7 @@ public AccountVO disableAccount(String accountName, Long domainId, Long accountI CallContext.current().putContextParameter(Account.class, account.getUuid()); return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to update account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to update account %s by accountId: %d OR by name: %s in domain %s", account, accountId, accountName, _domainMgr.getDomain(domainId))); } } @@ -2083,8 +2084,8 @@ public AccountVO updateAccount(UpdateAccountCmd cmd) { // Check if account exists if (account == null || account.getType() == Account.Type.PROJECT) { - logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); - throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + logger.error("Unable to find account by accountId: {} OR by name: {} in domain {}", accountId, accountName, _domainMgr.getDomain(domainId)); + throw new InvalidParameterValueException(String.format("Unable to find account by accountId: %d OR by name: %s in domain %s", accountId, accountName, _domainMgr.getDomain(domainId))); } // Don't allow to modify system account @@ -2101,16 +2102,17 @@ public AccountVO updateAccount(UpdateAccountCmd cmd) { if(newAccountName != null) { if (newAccountName.isEmpty()) { - throw new InvalidParameterValueException("The new account name for account '" + account.getUuid() + "' " + - "within domain '" + domainId + "' is empty string. Account will be not renamed."); + throw new InvalidParameterValueException(String.format("The new account name for " + + "account '%s' within domain '%s' is empty string. Account will be not renamed.", + account, _domainMgr.getDomain(domainId))); } // check if the new proposed account name is absent in the domain Account existingAccount = _accountDao.findActiveAccount(newAccountName, domainId); if (existingAccount != null && existingAccount.getId() != account.getId()) { - throw new InvalidParameterValueException("The account with the proposed name '" + - newAccountName + "' exists in the domain '" + - domainId + "' with existing account id '" + existingAccount.getId() + "'"); + throw new InvalidParameterValueException(String.format("The account with the " + + "proposed name '%s' exists in the domain '%s' with existing account %s", + newAccountName, _domainMgr.getDomain(domainId), existingAccount)); } acctForUpdate.setAccountName(newAccountName); @@ -2130,9 +2132,9 @@ public AccountVO updateAccount(UpdateAccountCmd cmd) { final List roles = cmd.roleService.listRoles(); final boolean roleNotFound = roles.stream().filter(r -> r.getId() == roleId).count() == 0; if (roleNotFound) { - throw new InvalidParameterValueException("Role with ID '" + roleId.toString() + "' " + - "is not found or not available for the account '" + account.getUuid() + "' " + - "in the domain '" + domainId + "'."); + throw new InvalidParameterValueException(String.format("Role with ID '%s' is not " + + "found or not available for the account '%s' in the domain '%s'.", + roleId.toString(), account, _domainMgr.getDomain(domainId))); } Role role = roleService.findRole(roleId); @@ -2165,7 +2167,7 @@ public AccountVO updateAccount(UpdateAccountCmd cmd) { CallContext.current().putContextParameter(Account.class, account.getUuid()); return _accountDao.findById(account.getId()); } else { - throw new CloudRuntimeException("Unable to update account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + throw new CloudRuntimeException(String.format("Unable to update account %s by accountId: %d OR by name: %s in domain %d", account, accountId, accountName, domainId)); } } @@ -2305,7 +2307,7 @@ protected void runInContext() { List removedAccounts = _accountDao.findCleanupsForRemovedAccounts(null); logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); for (AccountVO account : removedAccounts) { - logger.debug("Cleaning up " + account.getId()); + logger.debug("Cleaning up {}", account); cleanupAccount(account, getSystemUser().getId(), getSystemAccount()); } @@ -2313,11 +2315,11 @@ protected void runInContext() { List disabledAccounts = _accountDao.findCleanupsForDisabledAccounts(); logger.info("Found " + disabledAccounts.size() + " disabled accounts to cleanup"); for (AccountVO account : disabledAccounts) { - logger.debug("Disabling account " + account.getId()); + logger.debug("Disabling account {}", account); try { disableAccount(account.getId()); } catch (Exception e) { - logger.error("Skipping due to error on account " + account.getId(), e); + logger.error("Skipping due to error on account {}", account, e); } } @@ -2332,20 +2334,20 @@ protected void runInContext() { // release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain {}", inactiveDomain); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain {}", inactiveDomain); } } } - logger.debug("Removing inactive domain id=" + domainId); + logger.debug("Removing inactive domain {}", inactiveDomain); _domainMgr.removeDomain(domainId); } else { - logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need cleanup"); + logger.debug("Can't remove inactive domain {} as it has accounts that need cleanup", inactiveDomain); } } catch (Exception e) { - logger.error("Skipping due to error on domain " + domainId, e); + logger.error("Skipping due to error on domain {}", inactiveDomain, e); } } @@ -2356,10 +2358,10 @@ protected void runInContext() { try { Account projectAccount = getAccount(project.getProjectAccountId()); if (projectAccount == null) { - logger.debug("Removing inactive project id=" + project.getId()); + logger.debug("Removing inactive project {}", project); _projectMgr.deleteProject(CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), project); } else { - logger.debug("Can't remove disabled project " + project + " as it has non removed account id=" + project.getId()); + logger.debug("Can't remove disabled project {} as it has non removed account {}", project, projectAccount); } } catch (Exception e) { logger.error("Skipping due to error on project " + project, e); @@ -2410,7 +2412,7 @@ public Account finalizeOwner(Account caller, String accountName, Long domainId, Account owner = _accountDao.findActiveAccount(accountName, domainId); if (owner == null) { - throw new InvalidParameterValueException("Unable to find account " + accountName + " in domain " + domainId); + throw new InvalidParameterValueException(String.format("Unable to find account %s in domain %s", accountName, domain)); } checkAccess(caller, domain); @@ -2507,8 +2509,8 @@ public AccountVO createAccount(final String accountName, final Account.Type acco } if ((domainId != Domain.ROOT_DOMAIN) && (accountType == Account.Type.ADMIN)) { - throw new InvalidParameterValueException( - "Invalid account type " + accountType + " given for an account in domain " + domainId + "; unable to create user of admin role type in non-ROOT domain."); + throw new InvalidParameterValueException(String.format("Invalid account type %s given for " + + "an account in domain %s; unable to create user of admin role type in non-ROOT domain.", accountType, domain)); } // Validate account/user/domain settings @@ -2528,7 +2530,7 @@ public AccountVO createAccount(final String accountName, final Account.Type acco if (accountType == Account.Type.RESOURCE_DOMAIN_ADMIN) { List dc = _dcDao.findZonesByDomainId(domainId); if (dc.isEmpty()) { - throw new InvalidParameterValueException("The account cannot be created as domain " + domain.getName() + " is not associated with any private Zone"); + throw new InvalidParameterValueException(String.format("The account cannot be created as domain %s is not associated with any private Zone", domain)); } } @@ -2539,7 +2541,7 @@ public AccountVO doInTransaction(TransactionStatus status) { AccountVO account = _accountDao.persist(new AccountVO(accountName, domainId, networkDomain, accountType, roleId, uuid)); if (account == null) { - throw new CloudRuntimeException("Failed to create account name " + accountName + " in domain id=" + domainId); + throw new CloudRuntimeException(String.format("Failed to create account name %s in domain id=%s", accountName, _domainMgr.getDomain(domainId))); } Long accountId = account.getId(); @@ -2789,9 +2791,9 @@ private UserAccount getUserAccount(String username, String password, Long domain if (!userAccount.getState().equalsIgnoreCase(Account.State.ENABLED.toString()) || !userAccount.getAccountState().equalsIgnoreCase(Account.State.ENABLED.toString())) { if (logger.isInfoEnabled()) { - logger.info("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); + logger.info("User {} in domain {} is disabled/locked (or account is disabled/locked)", userAccount, domain); } - throw new CloudAuthenticationException("User " + username + " (or their account) in domain " + domainName + " is disabled/locked. Please contact the administrator."); + throw new CloudAuthenticationException(String.format("User %s (or their account) in domain %s is disabled/locked. Please contact the administrator.", userAccount, domain)); } // Whenever the user is able to log in successfully, reset the login attempts to zero if (!isInternalAccount(userAccount.getId())) { @@ -2833,8 +2835,7 @@ protected void updateLoginAttemptsWhenIncorrectLoginAttemptsEnabled(UserAccount (allowedLoginAttempts - attemptsMade) + " attempt(s) remaining"); } else { updateLoginAttempts(account.getId(), allowedLoginAttempts, true); - logger.warn("User " + account.getUsername() + - " has been disabled due to multiple failed login attempts." + " Please contact admin."); + logger.warn("User {} has been disabled due to multiple failed login attempts. Please contact admin.", account); } } @@ -2927,11 +2928,11 @@ public String[] createApiKeyAndSecretKey(RegisterCmd cmd) { // don't allow updating system user if (user.getId() == User.UID_SYSTEM) { - throw new PermissionDeniedException("user id : " + user.getId() + " is system account, update is not allowed"); + throw new PermissionDeniedException(String.format("user: %s is system account, update is not allowed", user)); } // don't allow baremetal system user if (BaremetalUtils.BAREMETAL_SYSTEM_ACCOUNT_NAME.equals(user.getUsername())) { - throw new PermissionDeniedException("user id : " + user.getId() + " is system account, update is not allowed"); + throw new PermissionDeniedException(String.format("user: %s is system account, update is not allowed", user)); } // generate both an api key and a secret key, update the user table with the keys, return the keys to the user @@ -2992,7 +2993,7 @@ private String createUserApiKey(long userId) { _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user {}", _userAccountDao.findById(userId), ex); } return null; } @@ -3019,7 +3020,7 @@ private String createUserSecretKey(long userId) { _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user {}", _userAccountDao.findById(userId), ex); } return null; } @@ -3125,7 +3126,7 @@ public void buildACLSearchParameters(Account caller, Long id, String accountName // check permissions permittedAccounts.add(userAccount.getId()); } else { - throw new InvalidParameterValueException("could not find account " + accountName + " in domain " + domain.getUuid()); + throw new InvalidParameterValueException("could not find account " + accountName + " in domain " + domain); } } @@ -3260,7 +3261,7 @@ public Long finalyzeAccountId(final String accountName, final Long domainId, fin if (!enabledOnly || account.getState() == Account.State.ENABLED) { return account.getId(); } else { - throw new PermissionDeniedException("Can't add resources to the account id=" + account.getId() + " in state=" + account.getState() + " as it's no longer active"); + throw new PermissionDeniedException(String.format("Can't add resources to the account %s in state=%s as it's no longer active", account, account.getState())); } } else { // idList is not used anywhere, so removed it now diff --git a/server/src/main/java/com/cloud/user/DomainManagerImpl.java b/server/src/main/java/com/cloud/user/DomainManagerImpl.java index 4a81772d6d75..6fc9c6f5ef53 100644 --- a/server/src/main/java/com/cloud/user/DomainManagerImpl.java +++ b/server/src/main/java/com/cloud/user/DomainManagerImpl.java @@ -362,7 +362,7 @@ public boolean deleteDomain(DomainVO domain, Boolean cleanup) { try { // mark domain as inactive - logger.debug("Marking domain id=" + domain.getId() + " as " + Domain.State.Inactive + " before actually deleting it"); + logger.debug("Marking domain {} as {} before actually deleting it", domain, Domain.State.Inactive); domain.setState(Domain.State.Inactive); _domainDao.update(domain.getId(), domain); @@ -402,12 +402,12 @@ private boolean cleanDomain(DomainVO domain, Boolean cleanup) { // remove dedicated BGP peers routedIpv4Manager.removeBgpPeersByDomainId(domain.getId()); - if (!_configMgr.releaseDomainSpecificVirtualRanges(domain.getId())) { + if (!_configMgr.releaseDomainSpecificVirtualRanges(domain)) { CloudRuntimeException e = new CloudRuntimeException("Can't delete the domain yet because failed to release domain specific virtual ip ranges"); e.addProxyObject(domain.getUuid(), "domainId"); throw e; } else { - logger.debug("Domain specific Virtual IP ranges " + " are successfully released as a part of domain id=" + domain.getId() + " cleanup."); + logger.debug("Domain specific Virtual IP ranges are successfully released as a part of domain {} cleanup.", domain); } cleanupDomainDetails(domain.getId()); @@ -416,7 +416,7 @@ private boolean cleanDomain(DomainVO domain, Boolean cleanup) { CallContext.current().putContextParameter(Domain.class, domain.getUuid()); return true; } catch (Exception ex) { - logger.error("Exception deleting domain with id " + domain.getId(), ex); + logger.error("Exception deleting domain {}", domain, ex); if (ex instanceof CloudRuntimeException) { rollbackDomainState(domain); throw (CloudRuntimeException)ex; @@ -431,8 +431,7 @@ private boolean cleanDomain(DomainVO domain, Boolean cleanup) { * @param domain domain */ protected void rollbackDomainState(DomainVO domain) { - logger.debug("Changing domain id=" + domain.getId() + " state back to " + Domain.State.Active + - " because it can't be removed due to resources referencing to it"); + logger.debug("Changing domain {} state back to {} because it can't be removed due to resources referencing to it", domain, Domain.State.Active); domain.setState(Domain.State.Active); _domainDao.update(domain.getId(), domain); } @@ -448,8 +447,7 @@ protected void rollbackDomainState(DomainVO domain) { protected void tryCleanupDomain(DomainVO domain, long ownerId) throws ConcurrentOperationException, ResourceUnavailableException, CloudRuntimeException { if (!cleanupDomain(domain.getId(), ownerId)) { CloudRuntimeException e = - new CloudRuntimeException("Failed to clean up domain resources and sub domains, delete failed on domain " + domain.getName() + " (id: " + - domain.getId() + ")."); + new CloudRuntimeException(String.format("Failed to clean up domain resources and sub domains, delete failed on domain %s", domain)); e.addProxyObject(domain.getUuid(), "domainId"); throw e; } @@ -472,7 +470,7 @@ protected void removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources( List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domain.getId()); List dedicatedResources = _dedicatedDao.listByDomainId(domain.getId()); if (CollectionUtils.isNotEmpty(dedicatedResources)) { - logger.error("There are dedicated resources for the domain " + domain.getId()); + logger.error("There are dedicated resources for the domain {}", domain); hasDedicatedResources = true; } if (accountsForCleanup.isEmpty() && networkIds.isEmpty() && !hasDedicatedResources) { @@ -514,8 +512,7 @@ protected void publishRemoveEventsAndRemoveDomain(DomainVO domain) { _messageBus.publish(_name, MESSAGE_PRE_REMOVE_DOMAIN_EVENT, PublishScope.LOCAL, domain); if (!_domainDao.remove(domain.getId())) { CloudRuntimeException e = - new CloudRuntimeException("Delete failed on domain " + domain.getName() + " (id: " + domain.getId() + - "); Please make sure all users and sub domains have been removed from the domain before deleting"); + new CloudRuntimeException(String.format("Delete failed on domain %s; Please make sure all users and sub domains have been removed from the domain before deleting", domain)); e.addProxyObject(domain.getUuid(), "domainId"); throw e; } @@ -604,9 +601,9 @@ private void removeDiskOfferings(Long domainId, String domainIdString) { } protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOperationException, ResourceUnavailableException { - logger.debug("Cleaning up domain id=" + domainId); boolean success = true; DomainVO domainHandle = _domainDao.findById(domainId); + logger.debug("Cleaning up domain {}", domainHandle); { domainHandle.setState(Domain.State.Inactive); _domainDao.update(domainId, domainHandle); @@ -629,7 +626,7 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp for (DomainVO domain : domains) { success = (success && cleanupDomain(domain.getId(), domain.getAccountId())); if (!success) { - logger.warn("Failed to cleanup domain id=" + domain.getId()); + logger.warn("Failed to cleanup domain {}", domain); } } } @@ -640,15 +637,15 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp List accounts = _accountDao.search(sc, null); for (AccountVO account : accounts) { if (account.getType() != Account.Type.PROJECT) { - logger.debug("Deleting account " + account + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting account {} as a part of domain {} cleanup", account, domainHandle); boolean deleteAccount = _accountMgr.deleteAccount(account, CallContext.current().getCallingUserId(), getCaller()); if (!deleteAccount) { - logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); + logger.warn("Failed to cleanup account {} as a part of domain cleanup", account); } success = (success && deleteAccount); } else { ProjectVO project = _projectDao.findByProjectAccountId(account.getId()); - logger.debug("Deleting project " + project + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting project {} as a part of domain {} cleanup", project, domainHandle); boolean deleteProject = _projectMgr.deleteProject(getCaller(), CallContext.current().getCallingUserId(), project); if (!deleteProject) { logger.warn("Failed to cleanup project " + project + " as a part of domain cleanup"); @@ -659,23 +656,23 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp //delete the domain shared networks boolean networksDeleted = true; - logger.debug("Deleting networks for domain id=" + domainId); + logger.debug("Deleting networks for domain {}", domainHandle); List networkIds = _networkDomainDao.listNetworkIdsByDomain(domainId); CallContext ctx = CallContext.current(); ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(ctx.getCallingUserId()), ctx.getCallingAccount()); for (Long networkId : networkIds) { - logger.debug("Deleting network id=" + networkId + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting network id={} as a part of domain {} cleanup", networkId, domainHandle); if (!_networkMgr.destroyNetwork(networkId, context, false)) { - logger.warn("Unable to destroy network id=" + networkId + " as a part of domain id=" + domainId + " cleanup."); + logger.warn("Unable to destroy network id={} as a part of domain {} cleanup.", networkId, domainHandle); networksDeleted = false; } else { - logger.debug("Network " + networkId + " successfully deleted as a part of domain id=" + domainId + " cleanup."); + logger.debug("Network {} successfully deleted as a part of domain {} cleanup.", networkId, domainHandle); } } //don't proceed if networks failed to cleanup. The cleanup will be performed for inactive domain once again if (!networksDeleted) { - logger.debug("Failed to delete the shared networks as a part of domain id=" + domainId + " clenaup"); + logger.debug("Failed to delete the shared networks as a part of domain {} cleanup", domainHandle); return false; } @@ -686,10 +683,10 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp //release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain {}", domainHandle); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain {}", domainHandle); return false; } } @@ -1005,8 +1002,8 @@ protected void validateNewParentDomainResourceLimit(DomainVO domainToBeMoved, Do if (currentDomainResourceCount + newParentDomainResourceCount > newParentDomainResourceLimit) { String message = String.format("Cannot move domain [%s] to parent domain [%s] as maximum domain resource limit of type [%s] would be exceeded. The current resource " - + "count for domain [%s] is [%s], the resource count for the new parent domain [%s] is [%s], and the limit is [%s].", domainToBeMoved.getUuid(), - newParentDomain.getUuid(), resourceType, domainToBeMoved.getUuid(), currentDomainResourceCount, newParentDomain.getUuid(), newParentDomainResourceCount, + + "count for domain [%s] is [%s], the resource count for the new parent domain [%s] is [%s], and the limit is [%s].", domainToBeMoved, + newParentDomain, resourceType, domainToBeMoved, currentDomainResourceCount, newParentDomain, newParentDomainResourceCount, newParentDomainResourceLimit); logger.error(message); throw new ResourceAllocationException(message, resourceType); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 12e9490a0d4d..aab90c3ecb82 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -747,12 +747,14 @@ private class VmIpAddrFetchThread extends ManagedContextRunnable { long nicId; long vmId; String vmName; + String vmUuid; boolean isWindows; Long hostId; String networkCidr; - public VmIpAddrFetchThread(long vmId, long nicId, String instanceName, boolean windows, Long hostId, String networkCidr) { + public VmIpAddrFetchThread(long vmId, String vmUuid, long nicId, String instanceName, boolean windows, Long hostId, String networkCidr) { this.vmId = vmId; + this.vmUuid = vmUuid; this.nicId = nicId; this.vmName = instanceName; this.isWindows = windows; @@ -765,10 +767,10 @@ protected void runInContext() { GetVmIpAddressCommand cmd = new GetVmIpAddressCommand(vmName, networkCidr, isWindows); boolean decrementCount = true; + NicVO nic = _nicDao.findById(nicId); try { - logger.debug("Trying for vm "+ vmId +" nic Id "+nicId +" ip retrieval ..."); + logger.debug("Trying for vm [id: {}, uuid: {}, name: {}] nic {} ip retrieval ...", vmId, vmUuid, vmName, nic); Answer answer = _agentMgr.send(hostId, cmd); - NicVO nic = _nicDao.findById(nicId); if (answer.getResult()) { String vmIp = answer.getDetails(); @@ -777,12 +779,13 @@ protected void runInContext() { if (nic != null) { nic.setIPv4Address(vmIp); _nicDao.update(nicId, nic); - logger.debug("Vm "+ vmId +" IP "+vmIp +" got retrieved successfully"); + logger.debug("Vm [id: {}, uuid: {}, name: {}] IP {} got retrieved successfully", vmId, vmUuid, vmName, vmIp); vmIdCountMap.remove(nicId); decrementCount = false; ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH, - "VM " + vmId + " nic id " + nicId + " ip address " + vmIp + " got fetched successfully", vmId, ApiCommandResourceType.VirtualMachine.toString()); + String.format("VM [id: %d, uuid: %s, name: %s] nic %s ip address %s got fetched successfully", + vmId, vmUuid, vmName, nic, vmIp), vmId, ApiCommandResourceType.VirtualMachine.toString()); } } } else { @@ -793,7 +796,8 @@ protected void runInContext() { _nicDao.update(nicId, nic); } if (answer.getDetails() != null) { - logger.debug("Failed to get vm ip for Vm "+ vmId + answer.getDetails()); + logger.debug("Failed to get vm ip for Vm [id: {}, uuid: {}, name: {}] {}", + vmId, vmUuid, vmName, answer.getDetails()); } } } catch (OperationTimedoutException e) { @@ -804,7 +808,8 @@ protected void runInContext() { if (decrementCount) { VmAndCountDetails vmAndCount = vmIdCountMap.get(nicId); vmAndCount.decrementCount(); - logger.debug("Ip is not retrieved for VM " + vmId +" nic "+nicId + " ... decremented count to "+vmAndCount.getRetrievalCount()); + logger.debug("Ip is not retrieved for VM [id: {}, uuid: {}, name: {}] nic {} ... decremented count to {}", + vmId, vmUuid, vmName, nic, vmAndCount.getRetrievalCount()); vmIdCountMap.put(nicId, vmAndCount); } } @@ -845,13 +850,13 @@ public UserVm resetVMPassword(ResetVMPasswordCmd cmd, String password) throws Re } if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - logger.error("vm is not in the right state: " + vmId); - throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); + logger.error("vm is not in the right state: {}", userVm); + throw new InvalidParameterValueException(String.format("Vm %s is not in the right state", userVm)); } if (userVm.getState() != State.Stopped) { - logger.error("vm is not in the right state: " + vmId); - throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do password reset"); + logger.error("vm is not in the right state: {}", userVm); + throw new InvalidParameterValueException(String.format("Vm %s should be stopped to do password reset", userVm)); } _accountMgr.checkAccess(caller, null, true, userVm); @@ -951,7 +956,7 @@ public UserVm resetVMUserData(ResetVMUserDataCmd cmd) throws ResourceUnavailable // Do parameters input validation if (userVm.getState() != State.Stopped) { - logger.error("vm is not in the right state: " + vmId); + logger.error("vm ({}) should be stopped to do UserData reset. current state: {}", userVm, userVm.getState()); throw new InvalidParameterValueException(String.format("VM %s should be stopped to do UserData reset", userVm)); } @@ -997,11 +1002,11 @@ public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableExce // Do parameters input validation if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - logger.error("vm is not in the right state: " + vmId); + logger.error("vm ({}) is not in the right state: {}", userVm, userVm.getState()); throw new InvalidParameterValueException("Vm with specified id is not in the right state"); } if (userVm.getState() != State.Stopped) { - logger.error("vm is not in the right state: " + vmId); + logger.error(String.format("vm (%s) is not in the stopped state. current state: %s", userVm, userVm.getState())); throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do SSH Key reset"); } @@ -1090,10 +1095,10 @@ private boolean resetVMSSHKeyInternal(Long vmId, String sshPublicKeys, String ke @Override public boolean stopVirtualMachine(long userId, long vmId) { boolean status = false; + UserVmVO vm = _vmDao.findById(vmId); if (logger.isDebugEnabled()) { - logger.debug("Stopping vm=" + vmId); + logger.debug("Stopping vm {} with id {}", vm, vmId); } - UserVmVO vm = _vmDao.findById(vmId); if (vm == null || vm.getRemoved() != null) { if (logger.isDebugEnabled()) { logger.debug("VM is either removed or deleted."); @@ -1118,11 +1123,11 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, UserVmVO vm = _vmDao.findById(vmId); if (logger.isTraceEnabled()) { - logger.trace(String.format("reboot %s with enterSetup set to %s", vm.getInstanceName(), Boolean.toString(enterSetup))); + logger.trace("reboot {} with enterSetup set to {}", vm, Boolean.toString(enterSetup)); } if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - logger.warn("Vm id=" + vmId + " doesn't exist"); + logger.warn("Vm {} with id={} doesn't exist or is not in correct state", vm, vmId); return null; } @@ -1134,7 +1139,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, if (vmOnHost == null || vmOnHost.getResourceState() != ResourceState.Enabled || vmOnHost.getStatus() != Status.Up ) { throw new CloudRuntimeException("Unable to force reboot the VM as the host: " + vm.getHostId() + " is not in the right state"); } - return forceRebootVirtualMachine(vmId, vm.getHostId(), enterSetup); + return forceRebootVirtualMachine(vm, vm.getHostId(), enterSetup); } DataCenterVO dc = _dcDao.findById(vm.getDataCenterId()); @@ -1152,7 +1157,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, //Safe to start the stopped router serially, this is consistent with the way how multiple networks are added to vm during deploy //and routers are started serially ,may revisit to make this process parallel for(DomainRouterVO routerToStart : routers) { - logger.warn("Trying to start router " + routerToStart.getInstanceName() + " as part of vm: " + vm.getInstanceName() + " reboot"); + logger.warn("Trying to start router {} as part of vm: {} reboot", routerToStart, vm); _virtualNetAppliance.startRouter(routerToStart.getId(),true); } } @@ -1162,7 +1167,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, throw new CloudRuntimeException("Router start failed due to" + ex); } finally { if (logger.isInfoEnabled()) { - logger.info(String.format("Rebooting vm %s%s.", vm.getInstanceName(), enterSetup? " entering hardware setup menu" : " as is")); + logger.info("Rebooting vm {}{}.", vm, enterSetup ? " entering hardware setup menu" : " as is"); } Map params = null; if (enterSetup) { @@ -1176,24 +1181,22 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, } return _vmDao.findById(vmId); } else { - logger.error("Vm id=" + vmId + " is not in Running state, failed to reboot"); + logger.error("Vm {} is not in Running state, failed to reboot", vm); return null; } } - private UserVm forceRebootVirtualMachine(long vmId, long hostId, boolean enterSetup) { + private UserVm forceRebootVirtualMachine(UserVmVO vm, long hostId, boolean enterSetup) { try { - if (stopVirtualMachine(vmId, false) != null) { + if (stopVirtualMachine(vm.getId(), false) != null) { Map params = new HashMap<>(); if (enterSetup) { params.put(VirtualMachineProfile.Param.BootIntoSetup, Boolean.TRUE); } - return startVirtualMachine(vmId, null, null, hostId, params, null, false).first(); + return startVirtualMachine(vm.getId(), null, null, hostId, params, null, false).first(); } - } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException("Unable to reboot the VM: " + vmId, e); } catch (CloudException e) { - throw new CloudRuntimeException("Unable to reboot the VM: " + vmId, e); + throw new CloudRuntimeException(String.format("Unable to reboot the VM: %s", vm), e); } return null; } @@ -1396,12 +1399,12 @@ protected ResizeVolumeCmd prepareResizeVolumeCmd(VolumeVO rootVolume, DiskOfferi long currentRootDiskOfferingGiB = currentRootDiskOffering.getDiskSize() / GiB_TO_BYTES; if (newNewOfferingRootSizeInBytes > currentRootDiskOffering.getDiskSize()) { resizeVolumeCmd = new ResizeVolumeCmd(rootVolume.getId(), newRootDiskOffering.getMinIops(), newRootDiskOffering.getMaxIops(), newRootDiskOffering.getId()); - logger.debug(String.format("Preparing command to resize VM Root disk from %d GB to %d GB; current offering: %s, new offering: %s.", currentRootDiskOfferingGiB, - newNewOfferingRootSizeInGiB, currentRootDiskOffering.getName(), newRootDiskOffering.getName())); + logger.debug("Preparing command to resize VM Root disk from {} GB to {} GB; current offering: {}, new offering: {}.", + currentRootDiskOfferingGiB, newNewOfferingRootSizeInGiB, currentRootDiskOffering, newRootDiskOffering); } else if (newNewOfferingRootSizeInBytes > 0l && newNewOfferingRootSizeInBytes < currentRootDiskOffering.getDiskSize()) { throw new InvalidParameterValueException(String.format( - "Failed to resize Root volume. The new Service Offering [id: %d, name: %s] has a smaller disk size [%d GB] than the current disk [%d GB].", - newRootDiskOffering.getId(), newRootDiskOffering.getName(), newNewOfferingRootSizeInGiB, currentRootDiskOfferingGiB)); + "Failed to resize Root volume. The new Service Offering [%s] has a smaller disk size [%d GB] than the current disk [%d GB].", + newRootDiskOffering, newNewOfferingRootSizeInGiB, currentRootDiskOfferingGiB)); } return resizeVolumeCmd; } @@ -1443,7 +1446,7 @@ public UserVm addNicToVirtualMachine(AddNicToVMCmd cmd) throws InvalidParameterV checkIfNetExistsForVM(vmInstance, network); - macAddress = validateOrReplaceMacAddress(macAddress, network.getId()); + macAddress = validateOrReplaceMacAddress(macAddress, network); if(_nicDao.findByNetworkIdAndMacAddress(networkId, macAddress) != null) { throw new CloudRuntimeException("A NIC with this MAC address exists for network: " + network.getUuid()); @@ -1462,16 +1465,17 @@ public UserVm addNicToVirtualMachine(AddNicToVMCmd cmd) throws InvalidParameterV // Verify that zone is not Basic DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == DataCenter.NetworkType.Basic) { - throw new CloudRuntimeException("Zone " + vmInstance.getDataCenterId() + ", has a NetworkType of Basic. Can't add a new NIC to a VM on a Basic Network"); + throw new CloudRuntimeException(String.format("Zone %s, has a NetworkType of Basic. Can't add a new NIC to a VM on a Basic Network", dc)); } //ensure network belongs in zone if (network.getDataCenterId() != vmInstance.getDataCenterId()) { - throw new CloudRuntimeException(vmInstance + " is in zone:" + vmInstance.getDataCenterId() + " but " + network + " is in zone:" + network.getDataCenterId()); + throw new CloudRuntimeException(String.format("%s is in zone: %s but %s is in zone: %s", + vmInstance, dc, network, dataCenterDao.findById(network.getDataCenterId()))); } if(_networkModel.getNicInNetwork(vmInstance.getId(),network.getId()) != null){ - logger.debug("VM " + vmInstance.getHostName() + " already in network " + network.getName() + " going to add another NIC"); + logger.debug("VM {} already in network {} going to add another NIC", vmInstance, network); } else { //* get all vms hostNames in the network List hostNames = _vmInstanceDao.listDistinctHostNames(network.getId()); @@ -1538,12 +1542,12 @@ private void checkIfNetExistsForVM(VirtualMachine virtualMachine, Network networ /** * If the given MAC address is invalid it replaces the given MAC with the next available MAC address */ - protected String validateOrReplaceMacAddress(String macAddress, long networkId) { + protected String validateOrReplaceMacAddress(String macAddress, NetworkVO network) { if (!NetUtils.isValidMac(macAddress)) { try { - macAddress = _networkModel.getNextAvailableMacAddressInNetwork(networkId); + macAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException(String.format("A MAC address cannot be generated for this NIC in the network [id=%s] ", networkId)); + throw new CloudRuntimeException(String.format("A MAC address cannot be generated for this NIC in the network [%s] ", network)); } } return macAddress; @@ -1592,7 +1596,7 @@ public UserVm removeNicFromVirtualMachine(RemoveNicFromVMCmd cmd) throws Invalid // Verify that zone is not Basic DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == DataCenter.NetworkType.Basic) { - throw new InvalidParameterValueException("Zone " + vmInstance.getDataCenterId() + ", has a NetworkType of Basic. Can't remove a NIC from a VM on a Basic Network"); + throw new InvalidParameterValueException(String.format("Zone %s, has a NetworkType of Basic. Can't remove a NIC from a VM on a Basic Network", dc)); } // check to see if nic is attached to VM @@ -1660,7 +1664,7 @@ public UserVm updateDefaultNicForVirtualMachine(UpdateDefaultNicForVMCmd cmd) th // Verify that zone is not Basic DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == DataCenter.NetworkType.Basic) { - throw new CloudRuntimeException("Zone " + vmInstance.getDataCenterId() + ", has a NetworkType of Basic. Can't change default NIC on a Basic Network"); + throw new CloudRuntimeException(String.format("Zone %s, has a NetworkType of Basic. Can't change default NIC on a Basic Network", dc)); } // no need to check permissions for network, we'll enumerate the ones they already have access to @@ -1752,8 +1756,7 @@ public UserVm updateDefaultNicForVirtualMachine(UpdateDefaultNicForVMCmd cmd) th return _vmDao.findById(vmInstance.getId()); } - throw new CloudRuntimeException("something strange happened, new default network(" + newdefault.getId() + ") is not null, and is not equal to the network(" - + nic.getNetworkId() + ") of the chosen nic"); + throw new CloudRuntimeException(String.format("something strange happened, new default network(%s) is not null, and is not equal to the network(%d) of the chosen nic", newdefault, nic.getNetworkId())); } @Override @@ -1812,10 +1815,10 @@ public UserVm updateNicIpForVirtualMachine(UpdateVmNicIpCmd cmd) { try { ipaddr = _ipAddrMgr.allocateGuestIP(network, ipaddr); } catch (InsufficientAddressCapacityException e) { - throw new InvalidParameterValueException("Allocating ip to guest nic " + nicVO.getUuid() + " failed, for insufficient address capacity"); + throw new InvalidParameterValueException(String.format("Allocating ip to guest nic %s failed, for insufficient address capacity", nicVO)); } if (ipaddr == null) { - throw new InvalidParameterValueException("Allocating ip to guest nic " + nicVO.getUuid() + " failed, please choose another ip"); + throw new InvalidParameterValueException(String.format("Allocating ip to guest nic %s failed, please choose another ip", nicVO)); } if (nicVO.getIPv4Address() != null) { @@ -1857,7 +1860,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } } catch (InsufficientAddressCapacityException e) { - logger.error("Allocating ip to guest nic " + nicVO.getUuid() + " failed, for insufficient address capacity"); + logger.error("Allocating ip to guest nic {} failed, for insufficient address capacity", nicVO); return null; } } else { @@ -2167,7 +2170,7 @@ private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOf if (currentRootDiskOffering.getId() == newDiskOffering.getId() && (!newDiskOffering.isCustomized() || (newDiskOffering.isCustomized() && Objects.equals(rootVolumeOfVm.getSize(), rootDiskSizeBytes)))) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Volume %s is already having disk offering %s", rootVolumeOfVm, newDiskOffering.getUuid())); + logger.debug("Volume {} is already having disk offering {}", rootVolumeOfVm, newDiskOffering); } continue; } @@ -2286,20 +2289,20 @@ public UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationE if (vm.getRemoved() != null) { if (logger.isDebugEnabled()) { - logger.debug("Unable to find vm or vm is removed: " + vmId); + logger.debug("Unable to find vm. vm is removed: {}", vm); } - throw new InvalidParameterValueException("Unable to find vm by id " + vmId); + throw new InvalidParameterValueException("Unable to find vm by id " + vm.getUuid()); } if (vm.getState() != State.Destroyed) { if (logger.isDebugEnabled()) { - logger.debug("vm is not in the right state: " + vmId); + logger.debug("vm {} is not in the Destroyed state. current sate: {}", vm, vm.getState()); } - throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); + throw new InvalidParameterValueException("Vm with id " + vm.getUuid() + " is not in the right state"); } if (logger.isDebugEnabled()) { - logger.debug("Recovering vm " + vmId); + logger.debug("Recovering vm {}", vm); } Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @@ -2326,11 +2329,11 @@ public UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationE try { if (!_itMgr.stateTransitTo(vm, VirtualMachine.Event.RecoveryRequested, null)) { - logger.debug("Unable to recover the vm because it is not in the correct state: " + vmId); - throw new InvalidParameterValueException("Unable to recover the vm because it is not in the correct state: " + vmId); + logger.debug("Unable to recover the vm {} because it is not in the correct state. current state: {}", vm, vm.getState()); + throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState())); } } catch (NoTransitionException e) { - throw new InvalidParameterValueException("Unable to recover the vm because it is not in the correct state: " + vmId); + throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState())); } // Recover the VM's disks @@ -2500,7 +2503,7 @@ public boolean expunge(UserVmVO vm) { // Cleanup vm resources - all the PF/LB/StaticNat rules // associated with vm logger.debug("Starting cleaning up vm " + vm + " resources..."); - if (cleanupVmResources(vm.getId())) { + if (cleanupVmResources(vm)) { logger.debug("Successfully cleaned up vm " + vm + " resources as a part of expunge process"); } else { logger.warn("Failed to cleanup resources as a part of vm " + vm + " expunge"); @@ -2548,20 +2551,21 @@ private void releaseNetworkResourcesOnExpunge(long id) throws ConcurrentOperatio } } - private boolean cleanupVmResources(long vmId) { + private boolean cleanupVmResources(UserVmVO vm) { + long vmId = vm.getId(); boolean success = true; // Remove vm from security groups - _securityGroupMgr.removeInstanceFromGroups(vmId); + _securityGroupMgr.removeInstanceFromGroups(vm); // Remove vm from instance group removeInstanceFromInstanceGroup(vmId); // cleanup firewall rules if (_firewallMgr.revokeFirewallRulesForVm(vmId)) { - logger.debug("Firewall rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Firewall rules are removed successfully as a part of vm {} expunge", vm); } else { success = false; - logger.warn("Fail to remove firewall rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove firewall rules as a part of vm {} expunge", vm); } // cleanup port forwarding rules @@ -2569,19 +2573,19 @@ private boolean cleanupVmResources(long vmId) { NsxProviderVO nsx = nsxProviderDao.findByZoneId(vmInstanceVO.getDataCenterId()); if (Objects.isNull(nsx) || Objects.isNull(kubernetesServiceHelpers.get(0).findByVmId(vmId))) { if (_rulesMgr.revokePortForwardingRulesForVm(vmId)) { - logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Port forwarding rules are removed successfully as a part of vm {} expunge", vm); } else { success = false; - logger.warn("Fail to remove port forwarding rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove port forwarding rules as a part of vm {} expunge", vm); } } // cleanup load balancer rules if (_lbMgr.removeVmFromLoadBalancers(vmId)) { - logger.debug("Removed vm id=" + vmId + " from all load balancers as a part of expunge process"); + logger.debug("Removed vm {} from all load balancers as a part of expunge process", vm); } else { success = false; - logger.warn("Fail to remove vm id=" + vmId + " from load balancers as a part of expunge process"); + logger.warn("Fail to remove vm {} from load balancers as a part of expunge process", vm); } // If vm is assigned to static nat, disable static nat for the ip @@ -2591,14 +2595,14 @@ private boolean cleanupVmResources(long vmId) { for (IPAddressVO ip : ips) { try { if (_rulesMgr.disableStaticNat(ip.getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM, true)) { - logger.debug("Disabled 1-1 nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.debug("Disabled 1-1 nat for ip address {} as a part of vm {} expunge", ip, vm); } else { - logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.warn("Failed to disable static nat for ip address {} as a part of vm {} expunge", ip, vm); success = false; } } catch (ResourceUnavailableException e) { success = false; - logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge because resource is unavailable", e); + logger.warn("Failed to disable static nat for ip address {} as a part of vm {} expunge because resource is unavailable", ip, vm, e); } } @@ -2633,7 +2637,7 @@ private void updateVmStateForFailedVmCreation(Long vmId, Long hostId) { volumeMgr.destroyVolume(volume); } } - String msg = "Failed to deploy Vm with Id: " + vmId + ", on Host with Id: " + hostId; + String msg = String.format("Failed to deploy Vm %s, on Host with Id: %d", vm, hostId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); // Get serviceOffering and template for Virtual Machine @@ -2683,7 +2687,7 @@ protected void runInContext() { VirtualMachine vm = vmProfile.getVirtualMachine(); boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - _vmIpFetchThreadExecutor.execute(new VmIpAddrFetchThread(vmId, nicId, vmInstance.getInstanceName(), + _vmIpFetchThreadExecutor.execute(new VmIpAddrFetchThread(vmId, vmInstance.getUuid(), nicId, vmInstance.getInstanceName(), isWindows, vm.getHostId(), network.getCidr())); } @@ -3031,8 +3035,8 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo } if (vm.getState() == State.Error || vm.getState() == State.Expunging) { - logger.error("vm is not in the right state: " + id); - throw new InvalidParameterValueException("Vm with id " + id + " is not in the right state"); + logger.error("vm {} is not in the correct state. current state: {}", vm, vm.getState()); + throw new InvalidParameterValueException(String.format("Vm %s is not in the right state", vm)); } if (displayName == null) { @@ -3095,7 +3099,7 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo throw new InvalidParameterValueException("Dynamic Scaling cannot be enabled for the VM since its service offering does not have dynamic scaling enabled"); } if (!UserVmManager.EnableDynamicallyScaleVm.valueIn(vm.getDataCenterId())) { - logger.debug(String.format("Dynamic Scaling cannot be enabled for the VM %s since the global setting enable.dynamic.scale.vm is set to false", vm.getUuid())); + logger.debug("Dynamic Scaling cannot be enabled for the VM {} since the global setting enable.dynamic.scale.vm is set to false", vm); throw new InvalidParameterValueException("Dynamic Scaling cannot be enabled for the VM since corresponding global setting is set to false"); } } @@ -3129,9 +3133,9 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo if (securityGroupIdList != null && _networkModel.isSecurityGroupSupportedInNetwork(defaultNetwork) && _networkModel.canAddDefaultSecurityGroup()) { if (vm.getState() == State.Stopped) { // Remove instance from security groups - _securityGroupMgr.removeInstanceFromGroups(id); + _securityGroupMgr.removeInstanceFromGroups(vm); // Add instance in provided groups - _securityGroupMgr.addInstanceToGroups(id, securityGroupIdList); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); } else { throw new InvalidParameterValueException("Virtual machine must be stopped prior to update security groups "); } @@ -3187,7 +3191,7 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo protected void updateUserData(UserVm vm) throws ResourceUnavailableException, InsufficientCapacityException { boolean result = updateUserDataInternal(vm); if (result) { - logger.debug(String.format("User data successfully updated for vm id: %s", vm.getId())); + logger.debug("User data successfully updated for vm id: {}", vm); } else { throw new CloudRuntimeException("Failed to reset userdata for the virtual machine "); } @@ -3202,7 +3206,7 @@ private void updateDns(UserVmVO vm, String hostName) throws ResourceUnavailableE List routers = _routerDao.findByNetwork(nic.getNetworkId()); for (DomainRouterVO router : routers) { if (router.getState() != State.Running) { - logger.warn(String.format("Unable to update DNS for VM %s, as virtual router: %s is not in the right state: %s ", vm, router.getName(), router.getState())); + logger.warn("Unable to update DNS for VM {}, as virtual router: {} is not in the right state: {} ", vm, router, router.getState()); continue; } Commands commands = new Commands(Command.OnError.Stop); @@ -3228,7 +3232,7 @@ private boolean updateUserDataInternal(UserVm vm) throws ResourceUnavailableExce List nics = _nicDao.listByVmId(vm.getId()); if (nics == null || nics.isEmpty()) { - logger.error("unable to find any nics for vm " + vm.getUuid()); + logger.error("unable to find any nics for vm {}", vm); return false; } @@ -3257,7 +3261,7 @@ protected boolean applyUserData(HypervisorType hyperVisorType, UserVm vm, Nic ni return true; } } else { - logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vmProfile.getId() + " because it is not supported in network id=" + network.getId()); + logger.debug("Not applying userdata for nic {} in vm {} because it is not supported in network {}", nic, vmProfile, network); } return false; } @@ -3316,7 +3320,7 @@ public UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityE _accountMgr.checkAccess(caller, null, true, vmInstance); - checkIfHostOfVMIsInPrepareForMaintenanceState(vmInstance.getHostId(), vmId, "Reboot"); + checkIfHostOfVMIsInPrepareForMaintenanceState(vmInstance, "Reboot"); // If the VM is Volatile in nature, on reboot discard the VM's root disk and create a new root disk for it: by calling restoreVM long serviceOfferingId = vmInstance.getServiceOfferingId(); @@ -3341,8 +3345,7 @@ public UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityE for (NicVO nic : nics) { Network network = _networkModel.getNetwork(nic.getNetworkId()); if (_networkModel.isSharedNetworkWithoutServices(network.getId())) { - logger.debug("Adding vm " +vmId +" nic id "+ nic.getId() +" into vmIdCountMap as part of vm " + - "reboot for vm ip fetch "); + logger.debug("Adding vm {} nic {} into vmIdCountMap as part of vm reboot for vm ip fetch ", userVm, nic); vmIdCountMap.put(nic.getId(), new VmAndCountDetails(nic.getInstanceId(), VmIpFetchTrialMax.value())); } } @@ -3404,7 +3407,7 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C } if (Arrays.asList(State.Destroyed, State.Expunging).contains(vm.getState()) && !expunge) { - logger.debug("Vm id=" + vmId + " is already destroyed"); + logger.debug("Vm {} is already destroyed", vm); return vm; } @@ -3421,11 +3424,11 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C checkPluginsIfVmCanBeDestroyed(vm); // check if there are active volume snapshots tasks - logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); - if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, vm destroy is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm {}", vm); List volumesToBeDeleted = getVolumesFromIds(cmd); @@ -3439,7 +3442,7 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C // Detach all data disks from VM List dataVols = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); - detachVolumesFromVm(dataVols); + detachVolumesFromVm(vm, dataVols); UserVm destroyedVm = destroyVm(vmId, expunge); if (expunge && !expunge(vm)) { @@ -3455,7 +3458,7 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C if (rootVolume != null) { _volService.destroyVolume(rootVolume.getId()); } else { - logger.warn(String.format("Tried to destroy ROOT volume for VM [%s], but couldn't retrieve it.", vm.getUuid())); + logger.warn("Tried to destroy ROOT volume for VM [{}], but couldn't retrieve it.", vm); } } @@ -3493,7 +3496,7 @@ public InstanceGroupVO createVmGroup(CreateVMGroupCmd cmd) { boolean isNameInUse = _vmGroupDao.isNameInUse(accountId, groupName); if (isNameInUse) { - throw new InvalidParameterValueException("Unable to create vm group, a group with name " + groupName + " already exists for account " + accountId); + throw new InvalidParameterValueException(String.format("Unable to create vm group, a group with name %s already exists for account %s", groupName, owner)); } return createVmGroup(groupName, accountId); @@ -3575,7 +3578,7 @@ public boolean addInstanceToGroup(final long userVmId, String groupName) { if (group != null) { UserVm userVm = _vmDao.acquireInLockTable(userVmId); if (userVm == null) { - logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm {} with id {}", vm, userVmId); } try { final InstanceGroupVO groupFinal = group; @@ -3586,8 +3589,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // it. InstanceGroupVO ngrpLock = _vmGroupDao.lockRow(groupFinal.getId(), false); if (ngrpLock == null) { - logger.warn("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); - throw new CloudRuntimeException("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); + logger.warn("Failed to acquire lock on vm group {}", groupFinal); + throw new CloudRuntimeException(String.format("Failed to acquire lock on vm group %s", groupFinal)); } // Currently don't allow to assign a vm to more than one group @@ -3768,7 +3771,7 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service } if (!_networkModel.isSecurityGroupSupportedInNetwork(network) && (ntwkOffering.getGuestType() != GuestType.L2)) { - throw new InvalidParameterValueException("Network is not security group enabled or not L2 network: " + network.getId()); + throw new InvalidParameterValueException(String.format("Network is not security group enabled or not L2 network: %s", network)); } _accountMgr.checkAccess(owner, AccessType.UseEntry, false, network); @@ -3883,7 +3886,7 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv // don't allow to use system networks NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); if (networkOffering.isSystemOnly()) { - throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); + throw new InvalidParameterValueException(String.format("Network id=%s is system only and can't be used for vm deployment", network.getUuid())); } networkList.add(network); } @@ -3900,9 +3903,9 @@ public UserVm finalizeCreateVirtualMachine(long vmId) { logger.info("Loading UserVm " + vmId + " from DB"); UserVm userVm = getUserVm(vmId); if (userVm == null) { - logger.info("Loaded UserVm " + vmId + " (" + userVm.getUuid() + ") from DB"); + logger.warn("UserVm with {} does not exist in DB", vmId); } else { - logger.warn("UserVm " + vmId + " does not exist in DB"); + logger.info("Loaded UserVm {} from DB", userVm); } return userVm; } @@ -3930,7 +3933,7 @@ private NetworkVO getNetworkToAddToNetworkList(VirtualMachineTemplate template, // don't allow to use system networks NetworkOffering networkOffering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); if (networkOffering.isSystemOnly()) { - throw new InvalidParameterValueException("Network id=" + networkId + " is system only and can't be used for vm deployment"); + throw new InvalidParameterValueException(String.format("Network id=%s is system only and can't be used for vm deployment", network.getUuid())); } return network; } @@ -3966,7 +3969,7 @@ private NetworkVO getDefaultNetwork(DataCenter zone, Account owner, boolean sele defaultNetwork = _networkDao.findById(virtualNetworks.get(0).getId()); } } else { - throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + throw new InvalidParameterValueException(String.format("Required network offering %s is not in %s", requiredOfferings.get(0), NetworkOffering.State.Enabled)); } return defaultNetwork; @@ -3982,7 +3985,7 @@ private NetworkVO createDefaultNetworkForAccount(DataCenter zone, Account owner, throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account {} from the network offering {} as a part of deployVM process", owner, requiredOfferings.get(0)); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null, null); @@ -4056,8 +4059,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { throw new PermissionDeniedException( - "Cannot perform this operation, Zone is currently disabled: " - + zone.getId()); + String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); } // check if zone is dedicated @@ -4118,7 +4120,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe } UserVm vm = getCheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, additionalDiskSize); - _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); if (affinityGroupIdList != null && !affinityGroupIdList.isEmpty()) { _affinityGroupVMMapDao.updateMap(vm.getId(), affinityGroupIdList); @@ -4204,20 +4206,16 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri } long dataDiskTemplateId = datadiskTemplateToDiskOffering.getKey(); if (!dataDiskTemplate.getParentTemplateId().equals(template.getId())) { - throw new InvalidParameterValueException("Invalid Datadisk template. Specified Datadisk template" + dataDiskTemplateId - + " doesn't belong to template " + template.getId()); + throw new InvalidParameterValueException(String.format("Invalid Datadisk template. Specified Datadisk template %s doesn't belong to template %s", dataDiskTemplate, template)); } if (dataDiskOffering == null) { - throw new InvalidParameterValueException("Invalid disk offering id " + datadiskTemplateToDiskOffering.getValue().getId() + - " specified for datadisk template " + dataDiskTemplateId); + throw new InvalidParameterValueException(String.format("Invalid disk offering %s specified for datadisk template %s", datadiskTemplateToDiskOffering.getValue(), dataDiskTemplate)); } if (dataDiskOffering.isCustomized()) { - throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " + - dataDiskTemplateId + ". Custom Disk offerings are not supported for Datadisk templates"); + throw new InvalidParameterValueException(String.format("Invalid disk offering %s specified for datadisk template %s. Custom Disk offerings are not supported for Datadisk templates", dataDiskOffering, dataDiskTemplate)); } if (dataDiskOffering.getDiskSize() < dataDiskTemplate.getSize()) { - throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " + - dataDiskTemplateId + ". Disk offering size should be greater than or equal to the template size"); + throw new InvalidParameterValueException(String.format("Invalid disk offering %s specified for datadisk template %s. Disk offering size should be greater than or equal to the template size", dataDiskOffering, dataDiskTemplate)); } _templateDao.loadDetails(dataDiskTemplate); resourceLimitService.checkVolumeResourceLimit(owner, true, dataDiskOffering.getDiskSize(), dataDiskOffering); @@ -4269,15 +4267,15 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri } if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(vmType) && !SHAREDFSVM.equals(vmType)) { - throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm"); + throw new InvalidParameterValueException(String.format("Unable to use system template %s to deploy a user vm", template)); } List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); if (listZoneTemplate == null || listZoneTemplate.isEmpty()) { - throw new InvalidParameterValueException("The template " + template.getId() + " is not available for use"); + throw new InvalidParameterValueException(String.format("The template %s is not available for use", template)); } if (isIso && !template.isBootable()) { - throw new InvalidParameterValueException("Installing from ISO requires an ISO that is bootable: " + template.getId()); + throw new InvalidParameterValueException(String.format("Installing from ISO requires an ISO that is bootable: %s", template)); } // Check templates permissions @@ -4308,8 +4306,7 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri for (NetworkVO network : networkList) { if ((network.getDataCenterId() != zone.getId())) { if (!network.isStrechedL2Network()) { - throw new InvalidParameterValueException("Network id=" + network.getId() + - " doesn't belong to zone " + zone.getId()); + throw new InvalidParameterValueException(String.format("Network %s doesn't belong to zone %s", network, zone)); } NetworkOffering ntwkOffering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -4358,7 +4355,7 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri } if (template.isEnablePassword()) { - throw new InvalidParameterValueException(String.format("Unable to deploy VM as template %s is password enabled, but there is no support for %s service in the default network %s/%s", template.getId(), Service.UserData.getName(), network.getName(), network.getUuid())); + throw new InvalidParameterValueException(String.format("Unable to deploy VM as template %s is password enabled, but there is no support for %s service in the default network %s/%s", template, Service.UserData.getName(), network.getName(), network.getUuid())); } } } @@ -4935,17 +4932,18 @@ public void collectVmNetworkStatistics (final UserVm userVm) { List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); final HostVO host = _hostDao.findById(hostId); + Account account = _accountMgr.getAccount(userVm.getAccountId()); GetVmNetworkStatsAnswer networkStatsAnswer = null; try { networkStatsAnswer = (GetVmNetworkStatsAnswer) _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - logger.warn("Error while collecting network stats for vm: " + userVm.getHostName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting network stats for vm: {} from host: {}", userVm, host, e); return; } if (networkStatsAnswer != null) { if (!networkStatsAnswer.getResult()) { - logger.warn("Error while collecting network stats vm: " + userVm.getHostName() + " from host: " + host.getName() + "; details: " + networkStatsAnswer.getDetails()); + logger.warn("Error while collecting network stats vm: {} from host: {}; details: {}", userVm, host, networkStatsAnswer.getDetails()); return; } try { @@ -4984,7 +4982,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (vmNetworkStat_lock == null) { - logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and nicId:" + nic.getId()); + logger.warn("unable to find vm network stats from host for account: {} with vm: {} and nic: {}", account, userVm, nic); continue; } @@ -4992,16 +4990,15 @@ public void doInTransactionWithoutResult(TransactionStatus status) { && ((previousvmNetworkStats.getCurrentBytesSent() != vmNetworkStat_lock.getCurrentBytesSent()) || (previousvmNetworkStats.getCurrentBytesReceived() != vmNetworkStat_lock.getCurrentBytesReceived()))) { logger.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " + - "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + + "Ignoring current answer. Host: " + host + " . VM: " + vmNetworkStat.getVmName() + " Sent(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Received(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesReceived())); continue; } if (vmNetworkStat_lock.getCurrentBytesSent() > vmNetworkStat.getBytesSent()) { if (logger.isDebugEnabled()) { - logger.debug("Sent # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + - " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); + logger.debug("Sent # of bytes that's less than the last one. Assuming something went wrong and persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmNetworkStat.getVmName(), toHumanReadableSize(vmNetworkStat.getBytesSent()), toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); } vmNetworkStat_lock.setNetBytesSent(vmNetworkStat_lock.getNetBytesSent() + vmNetworkStat_lock.getCurrentBytesSent()); } @@ -5009,9 +5006,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (vmNetworkStat_lock.getCurrentBytesReceived() > vmNetworkStat.getBytesReceived()) { if (logger.isDebugEnabled()) { - logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + - " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); + logger.debug("Received # of bytes that's less than the last one. Assuming something went wrong and persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmNetworkStat.getVmName(), toHumanReadableSize(vmNetworkStat.getBytesReceived()), toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); } vmNetworkStat_lock.setNetBytesReceived(vmNetworkStat_lock.getNetBytesReceived() + vmNetworkStat_lock.getCurrentBytesReceived()); } @@ -5028,7 +5024,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - logger.warn("Unable to update vm network statistics for vm: " + userVm.getId() + " from host: " + hostId, e); + logger.warn("Unable to update vm network statistics for vm: {} from host: {}", userVm, host, e); } } } @@ -5096,7 +5092,7 @@ private UserVm startVirtualMachine(long vmId, Long podId, Long clusterId, Long h } } catch (Exception e) { - logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e); + logger.fatal("Unable to resize the data disk for vm {} due to {}", vm, e.getMessage(), e); } } finally { @@ -5484,8 +5480,8 @@ public Pair> startVirtualMach UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); // if account is removed, return error - if (callerAccount != null && callerAccount.getRemoved() != null) { - throw new InvalidParameterValueException("The account " + callerAccount.getId() + " is removed"); + if (callerAccount == null || callerAccount.getRemoved() != null) { + throw new InvalidParameterValueException(String.format("The account %s is removed", callerAccount)); } UserVmVO vm = _vmDao.findById(vmId); @@ -5507,7 +5503,7 @@ public Pair> startVirtualMach } if (owner.getState() == Account.State.DISABLED) { - throw new PermissionDeniedException("The owner of " + vm + " is disabled: " + vm.getAccountId()); + throw new PermissionDeniedException(String.format("The owner of %s is disabled: %s", vm, owner)); } VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); if (VirtualMachineManager.ResourceCountRunningVMsonly.value()) { @@ -5527,7 +5523,7 @@ public Pair> startVirtualMach if (defaultSecurityGroup != null) { List groupList = new ArrayList(); groupList.add(defaultSecurityGroup.getId()); - _securityGroupMgr.addInstanceToGroups(vmId, groupList); + _securityGroupMgr.addInstanceToGroups(vm, groupList); } } // Choose deployment planner @@ -5549,9 +5545,9 @@ public Pair> startVirtualMach if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { String errorMsg; if (!cpuCapabilityAndCapacity.first()) { - errorMsg = String.format("Cannot deploy the VM to specified host %d, requested CPU and speed is more than the host capability", hostId); + errorMsg = String.format("Cannot deploy the VM to specified host %s, requested CPU and speed is more than the host capability", destinationHost); } else { - errorMsg = String.format("Cannot deploy the VM to specified host %d, host does not have enough free CPU or RAM, please check the logs", hostId); + errorMsg = String.format("Cannot deploy the VM to specified host %s, host does not have enough free CPU or RAM, please check the logs", destinationHost); } logger.info(errorMsg); if (!AllowDeployVmIfGivenHostFails.value()) { @@ -5746,7 +5742,7 @@ public UserVm destroyVm(long vmId, boolean expunge) throws ResourceUnavailableEx } if (vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { - logger.trace("Vm id=" + vmId + " is already destroyed"); + logger.trace("Vm {} is already destroyed", vm); return vm; } @@ -5815,17 +5811,18 @@ public void collectVmDiskStatistics(final UserVm userVm) { List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); final HostVO host = _hostDao.findById(hostId); + Account account = _accountMgr.getAccount(userVm.getAccountId()); GetVmDiskStatsAnswer diskStatsAnswer = null; try { diskStatsAnswer = (GetVmDiskStatsAnswer)_agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - logger.warn("Error while collecting disk stats for vm: " + userVm.getInstanceName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting disk stats for vm: {} from host: {}", userVm, host, e); return; } if (diskStatsAnswer != null) { if (!diskStatsAnswer.getResult()) { - logger.warn("Error while collecting disk stats vm: " + userVm.getInstanceName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); + logger.warn("Error while collecting disk stats vm: {} from host: {}; details: {}", userVm, host, diskStatsAnswer.getDetails()); return; } try { @@ -5859,8 +5856,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (vmDiskStat_lock == null) { - logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" - + volume.getId()); + logger.warn("unable to find vm disk stats from host for account: {} with vm: {} and volume: {}", account, userVm, volume); continue; } @@ -5869,41 +5865,53 @@ public void doInTransactionWithoutResult(TransactionStatus status) { .getCurrentIOWrite()) || (previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) || (previousVmDiskStats .getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite())))) { - logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " IO Read: " + vmDiskStat.getIORead() + " IO Write: " + vmDiskStat.getIOWrite() + " Bytes Read: " - + vmDiskStat.getBytesRead() + " Bytes Write: " + vmDiskStat.getBytesWrite()); + logger.debug("vm disk stats changed from the time" + + " GetVmDiskStatsCommand was sent. Ignoring current " + + "answer. Host: {} . VM: {} IO Read: {} IO Write: {} " + + "Bytes Read: {} Bytes Write: {}", + host, vmDiskStat, vmDiskStat.getIORead(), vmDiskStat.getIOWrite(), + vmDiskStat.getBytesRead(), vmDiskStat.getBytesWrite()); continue; } if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { if (logger.isDebugEnabled()) { - logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); + logger.debug("Read # of IO that's less than " + + "the last one. Assuming something went wrong and " + + "persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmDiskStat, vmDiskStat.getIORead(), vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { if (logger.isDebugEnabled()) { - logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); + logger.debug("Write # of IO that's less than " + + "the last one. Assuming something went wrong and " + + "persisting it. Host: {}. VM: {} Reported: {} Stored: {}", + host, vmDiskStat, vmDiskStat.getIOWrite(), vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { if (logger.isDebugEnabled()) { - logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesRead()) + " Stored: " + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesRead())); + logger.debug("Read # of Bytes that's less " + + "than the last one. Assuming something went wrong and" + + " persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmDiskStat, toHumanReadableSize(vmDiskStat.getBytesRead()), + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesRead())); } vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); } vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { if (logger.isDebugEnabled()) { - logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() - + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesWrite()) + " Stored: " - + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite())); + logger.debug("Write # of Bytes that's less " + + "than the last one. Assuming something went wrong " + + "and persisting it. Host: {} . VM: {} Reported: {} Stored: {}", + host, vmDiskStat, toHumanReadableSize(vmDiskStat.getBytesWrite()), + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite())); } vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); } @@ -5922,7 +5930,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - logger.warn(String.format("Unable to update VM disk statistics for %s from %s", userVm.getInstanceName(), host), e); + logger.warn("Unable to update VM disk statistics for {} from {}", userVm, host, e); } } } @@ -5945,7 +5953,7 @@ public UserVm expungeVm(long vmId) throws ResourceUnavailableException, Concurre } if (vm.getRemoved() != null) { - logger.trace("Vm id=" + vmId + " is already expunged"); + logger.trace("Vm {} is already expunged", vm); return vm; } @@ -6127,7 +6135,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingId); } if (diskOffering.isComputeOnly()) { - throw new InvalidParameterValueException(String.format("The disk offering id %d provided is directly mapped to a service offering, please provide an individual disk offering", diskOfferingId)); + throw new InvalidParameterValueException(String.format("The disk offering %s provided is directly mapped to a service offering, please provide an individual disk offering", diskOffering)); } } @@ -6212,7 +6220,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE List child_templates = _templateDao.listByParentTemplatetId(templateId); for (VMTemplateVO tmpl: child_templates){ if (tmpl.getFormat() == Storage.ImageFormat.ISO){ - logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId()); + logger.info("MDOV trying to attach disk {} to the VM {}", tmpl, vm); _tmplService.attachIso(tmpl.getId(), vm.getId(), true); } } @@ -6221,7 +6229,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE String extraConfig = cmd.getExtraConfig(); if (StringUtils.isNotBlank(extraConfig)) { if (EnableAdditionalVmConfig.valueIn(callerId)) { - logger.info("Adding extra configuration to user vm: " + vm.getUuid()); + logger.info("Adding extra configuration to user vm: {}", vm); addExtraConfig(vm, extraConfig); } else { throw new InvalidParameterValueException("attempted setting extraconfig but enable.additional.vm.configuration is disabled"); @@ -6712,7 +6720,7 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr throw ex; } - checkIfHostOfVMIsInPrepareForMaintenanceState(vm.getHostId(), vmId, "Migrate"); + checkIfHostOfVMIsInPrepareForMaintenanceState(vm, "Migrate"); if(serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); @@ -6840,19 +6848,17 @@ private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcH // check max guest vm limit for the destinationHost if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() - + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); + logger.debug("Host: {} already has max Running VMs(count includes system VMs), cannot migrate to this host", destinationHost); } - throw new VirtualMachineMigrationException("Destination host, hostId: " + destinationHost.getId() - + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); + throw new VirtualMachineMigrationException(String.format("Destination host: %s already has max Running VMs(count includes system VMs), cannot migrate to this host", destinationHost)); } //check if there are any ongoing volume snapshots on the volumes associated with the VM. Long vmId = vm.getId(); - logger.debug("Checking if there are any ongoing snapshots volumes associated with VM with ID " + vmId); - if (checkStatusOfVolumeSnapshots(vmId, null)) { + logger.debug("Checking if there are any ongoing snapshots volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, null)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on volume(s) attached to this VM, VM Migration is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volumes associated with the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volumes associated with the vm {}", vm); return dest; } @@ -6880,14 +6886,15 @@ private boolean checkIfHostIsDedicated(HostVO host) { } } - private void checkIfHostOfVMIsInPrepareForMaintenanceState(Long hostId, Long vmId, String operation) { + private void checkIfHostOfVMIsInPrepareForMaintenanceState(VirtualMachine vm, String operation) { + long hostId = vm.getHostId(); HostVO host = _hostDao.findById(hostId); if (host.getResourceState() != ResourceState.PrepareForMaintenance) { return; } - logger.debug("Host is in PrepareForMaintenance state - " + operation + " VM operation on the VM id: " + vmId + " is not allowed"); - throw new InvalidParameterValueException(operation + " VM operation on the VM id: " + vmId + " is not allowed as host is preparing for maintenance mode"); + logger.debug("Host is in PrepareForMaintenance state - {} VM operation on the VM: {} is not allowed", operation, vm); + throw new InvalidParameterValueException(String.format("%s VM operation on the VM: %s is not allowed as host is preparing for maintenance mode", operation, vm)); } private Long accountOfDedicatedHost(HostVO host) { @@ -6932,14 +6939,14 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI //if srcHost is explicitly dedicated and destination Host is not if (srcExplDedicated && !destExplDedicated) { //raise an alert - String msg = "VM is being migrated from a explicitly dedicated host " + srcHost.getName() + " to non-dedicated host " + destHost.getName(); + String msg = String.format("VM is being migrated from a explicitly dedicated host %s to non-dedicated host %s", srcHost, destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } //if srcHost is non dedicated but destination Host is explicitly dedicated if (!srcExplDedicated && destExplDedicated) { //raise an alert - String msg = "VM is being migrated from a non dedicated host " + srcHost.getName() + " to a explicitly dedicated host " + destHost.getName(); + String msg = String.format("VM is being migrated from a non dedicated host %s to a explicitly dedicated host %s", srcHost, destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } @@ -6947,14 +6954,14 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI //if hosts are dedicated to different account/domains, raise an alert if (srcExplDedicated && destExplDedicated) { if (!((accountOfDedicatedHost(srcHost) == null) || (accountOfDedicatedHost(srcHost).equals(accountOfDedicatedHost(destHost))))) { - String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(srcHost) + " to host " - + destHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(destHost); + String msg = String.format("VM is being migrated from host %s explicitly dedicated to account %d to host %s explicitly dedicated to account %d", + srcHost, accountOfDedicatedHost(srcHost), destHost, accountOfDedicatedHost(destHost)); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } if (!((domainOfDedicatedHost(srcHost) == null) || (domainOfDedicatedHost(srcHost).equals(domainOfDedicatedHost(destHost))))) { - String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(srcHost) + " to host " - + destHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(destHost); + String msg = String.format("VM is being migrated from host %s explicitly dedicated to domain %d to host %s explicitly dedicated to domain %d", + srcHost, domainOfDedicatedHost(srcHost), destHost, domainOfDedicatedHost(destHost)); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } @@ -6965,7 +6972,7 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI if (deployPlanner.getDeploymentPlanner() != null && deployPlanner.getDeploymentPlanner().equals("ImplicitDedicationPlanner")) { //VM is deployed using implicit planner long accountOfVm = vm.getAccountId(); - String msg = "VM of account " + accountOfVm + " with implicit deployment planner being migrated to host " + destHost.getName(); + String msg = String.format("VM of account %d with implicit deployment planner being migrated to host %s", accountOfVm, destHost); //Get all vms on destination host boolean emptyDestination = false; List vmsOnDest = getVmsOnHost(destHostId); @@ -6978,8 +6985,7 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI if (!isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) { //Check if all vms on destination host are created using strict implicit mode if (!checkIfAllVmsCreatedInStrictMode(accountOfVm, vmsOnDest)) { - msg = "VM of account " + accountOfVm + " with strict implicit deployment planner being migrated to host " + destHost.getName() - + " not having all vms strict implicitly dedicated to account " + accountOfVm; + msg = String.format("VM of account %d with strict implicit deployment planner being migrated to host %s not having all vms strict implicitly dedicated to account %d", accountOfVm, destHost, accountOfVm); } } else { //If vm is deployed using preferred implicit planner, check if all vms on destination host must be @@ -6987,8 +6993,7 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI for (VMInstanceVO vmsDest : vmsOnDest) { ServiceOfferingVO destPlanner = serviceOfferingDao.findById(vm.getId(), vmsDest.getServiceOfferingId()); if (!((destPlanner.getDeploymentPlanner() != null && destPlanner.getDeploymentPlanner().equals("ImplicitDedicationPlanner")) && vmsDest.getAccountId() == accountOfVm)) { - msg = "VM of account " + accountOfVm + " with preffered implicit deployment planner being migrated to host " + destHost.getName() - + " not having all vms implicitly dedicated to account " + accountOfVm; + msg = String.format("VM of account %d with preffered implicit deployment planner being migrated to host %s not having all vms implicitly dedicated to account %d", accountOfVm, destHost, accountOfVm); } } } @@ -7012,15 +7017,15 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI } if (srcImplDedicated) { if (destImplDedicated) { - msg = "VM is being migrated from implicitly dedicated host " + srcHost.getName() + " to another implicitly dedicated host " + destHost.getName(); + msg = String.format("VM is being migrated from implicitly dedicated host %s to another implicitly dedicated host %s", srcHost, destHost); } else { - msg = "VM is being migrated from implicitly dedicated host " + srcHost.getName() + " to shared host " + destHost.getName(); + msg = String.format("VM is being migrated from implicitly dedicated host %s to shared host %s", srcHost, destHost); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } else { if (destImplDedicated) { - msg = "VM is being migrated from shared host " + srcHost.getName() + " to implicitly dedicated host " + destHost.getName(); + msg = String.format("VM is being migrated from shared host %s to implicitly dedicated host %s", srcHost, destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); logger.warn(msg); } @@ -7063,11 +7068,13 @@ private boolean checkIfAllVmsCreatedInStrictMode(Long accountId, List requestedIPv6ForNics = new HashMap<>(); if (_networkModel.checkSecurityGroupSupportForNetwork(newAccount, zone, networkIdList, securityGroupIdList)) { // advanced zone with security groups // cleanup the old security groups - _securityGroupMgr.removeInstanceFromGroups(cmd.getVmId()); + _securityGroupMgr.removeInstanceFromGroups(vm); // if networkIdList is null and the first network of vm is shared network, then keep it if possible if (networkIdList == null || networkIdList.isEmpty()) { NicVO defaultNicOld = _nicDao.findDefaultNicForVM(vm.getId()); @@ -7630,7 +7637,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { applicableNetworks.add(defaultNetworkOld); requestedIPv4ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv4Address()); requestedIPv6ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on default nic of vm: {}", defaultNetworkOld, defaultNicOld.getIPv4Address(), vm); } } } @@ -7662,10 +7669,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (nicOld != null) { requestedIPv4ForNics.put(network.getId(), nicOld.getIPv4Address()); requestedIPv6ForNics.put(network.getId(), nicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on nic of vm: {}", network, nicOld.getIPv4Address(), vm); } } - logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); + logger.debug("AssignVM: Added network {} to vm {}", network, vm); applicableNetworks.add(network); } } @@ -7740,11 +7747,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _networkMgr.allocate(vmProfile, networks, null); } - _securityGroupMgr.addInstanceToGroups(vm.getId(), - securityGroupIdList); - logger.debug("AssignVM: Advanced zone, adding security groups no " - + securityGroupIdList.size() + " to " - + vm.getInstanceName()); + _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); + logger.debug("AssignVM: Advanced zone, adding security groups no {} to {}", securityGroupIdList.size(), vm); } else { if (securityGroupIdList != null && !securityGroupIdList.isEmpty()) { @@ -7759,7 +7763,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { applicableNetworks.add(defaultNetworkOld); requestedIPv4ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv4Address()); requestedIPv6ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on default nic of vm: {}", defaultNetworkOld, defaultNicOld.getIPv4Address(), vm); } } } @@ -7789,10 +7793,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (nicOld != null) { requestedIPv4ForNics.put(network.getId(), nicOld.getIPv4Address()); requestedIPv6ForNics.put(network.getId(), nicOld.getIPv6Address()); - logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network {} with old ip {} on nic of vm: {}", network, nicOld.getIPv4Address(), vm); } } - logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); + logger.debug("AssignVM: Added network {} to vm {}", network, vm); applicableNetworks.add(network); } } else if (applicableNetworks.isEmpty()) { @@ -7814,8 +7818,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + newAccount + " from the network offering id=" + requiredOfferings.get(0).getId() - + " as a part of deployVM process"); + logger.debug("Creating network for account {} from the network offering {} as a part of deployVM process", newAccount, requiredOfferings.get(0)); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, null, false, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, @@ -7850,7 +7853,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { defaultNetwork = _networkDao.findById(virtualNetworks.get(0).getId()); } } else { - throw new InvalidParameterValueException("Required network offering id=" + requiredOfferings.get(0).getId() + " is not in " + NetworkOffering.State.Enabled); + throw new InvalidParameterValueException(String.format("Required network offering %s is not in %s", requiredOfferings.get(0), NetworkOffering.State.Enabled)); } applicableNetworks.add(defaultNetwork); @@ -7876,10 +7879,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { VirtualMachine vmi = _itMgr.findById(vm.getId()); VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmi); _networkMgr.allocate(vmProfile, networks, null); - logger.debug("AssignVM: Advance virtual, adding networks no " + networks.size() + " to " + vm.getInstanceName()); + logger.debug("AssignVM: Advance virtual, adding networks no {} to {}", networks.size(), vm); } // END IF NON SEC GRP ENABLED } // END IF ADVANCED - logger.info("AssignVM: vm " + vm.getInstanceName() + " now belongs to account " + newAccount.getAccountName()); + logger.info("AssignVM: vm {} now belongs to account {}", vm, newAccount); return vm; } @@ -7891,7 +7894,7 @@ private boolean canAccountUseNetwork(Account newAccount, Network network) { _networkModel.checkNetworkPermissions(newAccount, network); return true; } catch (PermissionDeniedException e) { - logger.debug(String.format("AssignVM: %s network %s can not be used by new account %s", network.getGuestType(), network.getName(), newAccount.getAccountName())); + logger.debug("AssignVM: {} network {} can not be used by new account {}", network.getGuestType(), network, newAccount); return false; } } @@ -7962,11 +7965,11 @@ public UserVm restoreVM(RestoreVMCmd cmd) throws InsufficientCapacityException, } //check if there are any active snapshots on volumes associated with the VM - logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); - if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, Re-install VM is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm {}", vm); return restoreVMInternal(caller, vm, newTemplateId, rootDiskOfferingId, expunge, details); } @@ -8036,7 +8039,7 @@ public UserVm restoreVirtualMachine(final Account caller, final long vmId, final } if (owner.getState() == Account.State.DISABLED) { - throw new PermissionDeniedException("The owner of " + vm + " is disabled: " + vm.getAccountId()); + throw new PermissionDeniedException(String.format("The owner of %s is disabled: %s", vm, owner)); } if (vm.getState() != VirtualMachine.State.Running && vm.getState() != VirtualMachine.State.Stopped) { @@ -8071,7 +8074,7 @@ public UserVm restoreVirtualMachine(final Account caller, final long vmId, final try { checkRestoreVmFromTemplate(vm, template, rootVols, diskOffering, details); } catch (ResourceAllocationException e) { - logger.error("Failed to restore VM " + vm.getUuid() + " due to " + e.getMessage(), e); + logger.error("Failed to restore VM {} due to {}", vm, e.getMessage(), e); throw new CloudRuntimeException("Failed to restore VM " + vm.getUuid() + " due to " + e.getMessage(), e); } @@ -8079,7 +8082,7 @@ public UserVm restoreVirtualMachine(final Account caller, final long vmId, final try { _itMgr.stop(vm.getUuid()); } catch (ResourceUnavailableException e) { - logger.debug("Stop vm " + vm.getUuid() + " failed", e); + logger.debug("Stop vm {} failed", vm, e); CloudRuntimeException ex = new CloudRuntimeException("Stop vm failed for specified vmId"); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; @@ -8130,7 +8133,7 @@ public Pair doInTransaction(final TransactionStatus status) th } catch (final CloudRuntimeException e) { throw e; } catch (final Exception e) { - logger.error("Unable to restore VM " + userVm.getUuid(), e); + logger.error("Unable to restore VM {}", userVm, e); throw new CloudRuntimeException(e); } @@ -8162,12 +8165,12 @@ public Pair doInTransaction(final TransactionStatus status) th if (vm.getHypervisorType() == HypervisorType.VMware) { VolumeInfo volumeInStorage = volFactory.getVolume(root.getId()); if (volumeInStorage != null) { - logger.info("Expunging volume " + root.getId() + " from primary data store"); + logger.info("Expunging volume {} from primary data store", root); AsyncCallFuture future = _volService.expungeVolumeAsync(volFactory.getVolume(root.getId())); try { future.get(); } catch (Exception e) { - logger.debug("Failed to expunge volume:" + root.getId(), e); + logger.debug("Failed to expunge volume: {}", root, e); } } } @@ -8214,7 +8217,7 @@ public Pair doInTransaction(final TransactionStatus status) th } } - logger.debug("Restore VM " + vmId + " done successfully"); + logger.debug("Restore VM {} done successfully", vm); return vm; } @@ -8418,7 +8421,7 @@ else if (host.getHypervisorType() == HypervisorType.KVM) { if (!cmds.isSuccessful()) { for (Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - logger.warn("Failed to reset vm due to: " + answer.getDetails()); + logger.warn("Failed to reset vm {} due to: {}", vm, answer.getDetails()); throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails()); } @@ -8459,12 +8462,12 @@ private void handleTargetsForVMware(long hostId, String storageAddress, int stor cmd.setAdd(false); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = _agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, HostVO host) { + Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; @@ -8472,7 +8475,7 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } @@ -8511,7 +8514,7 @@ public void persistDeviceBusInfo(UserVmVO vm, String rootDiskController) { vm.setDetail(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController); _vmDao.saveDetails(vm); if (logger.isDebugEnabled()) { - logger.debug("Persisted device bus information rootDiskController=" + rootDiskController + " for vm: " + vm.getDisplayName()); + logger.debug("Persisted device bus information rootDiskController={} for vm: {}", rootDiskController, vm); } } } @@ -8550,7 +8553,8 @@ public boolean isDisplayResourceEnabled(Long vmId) { return true; } - private boolean checkStatusOfVolumeSnapshots(long vmId, Volume.Type type) { + private boolean checkStatusOfVolumeSnapshots(VirtualMachine vm, Volume.Type type) { + long vmId = vm.getId(); List listVolumes = null; if (type == Volume.Type.ROOT) { listVolumes = _volsDao.findByInstanceAndType(vmId, type); @@ -8559,13 +8563,13 @@ private boolean checkStatusOfVolumeSnapshots(long vmId, Volume.Type type) { } else { listVolumes = _volsDao.findByInstance(vmId); } - logger.debug("Found "+listVolumes.size()+" no. of volumes of type "+type+" for vm with VM ID "+vmId); + logger.debug("Found {} no. of volumes of type {} for vm with VM ID {}", listVolumes.size(), type, vm); for (VolumeVO volume : listVolumes) { Long volumeId = volume.getId(); - logger.debug("Checking status of snapshots for Volume with Volume Id: "+volumeId); + logger.debug("Checking status of snapshots for Volume with Volume: {}", volume); List ongoingSnapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); int ongoingSnapshotsCount = ongoingSnapshots.size(); - logger.debug("The count of ongoing Snapshots for VM with ID "+vmId+" and disk type "+type+" is "+ongoingSnapshotsCount); + logger.debug("The count of ongoing Snapshots for VM {} and disk type {} is {}", vm, type, ongoingSnapshotsCount); if (ongoingSnapshotsCount > 0) { logger.debug("Found "+ongoingSnapshotsCount+" no. of snapshots, on volume of type "+type+", which snapshots are not yet backed up"); return true; @@ -8603,7 +8607,7 @@ private void validateVolumes(List volumes) { } } - private void detachVolumesFromVm(List volumes) { + private void detachVolumesFromVm(UserVm vm, List volumes) { for (VolumeVO volume : volumes) { // Create new context and inject correct event resource type, id and details, @@ -8622,7 +8626,7 @@ private void detachVolumesFromVm(List volumes) { } if (detachResult == null) { - logger.error("DestroyVM remove volume - failed to detach and delete volume " + volume.getInstanceId() + " from instance " + volume.getId()); + logger.error("DestroyVM remove volume - failed to detach and delete volume {} from instance {}", volume, vm); } } } @@ -8645,7 +8649,7 @@ private void destroyVolumeInContext(UserVmVO vm, boolean expunge, VolumeVO volum Volume result = _volumeService.destroyVolume(volume.getId(), CallContext.current().getCallingAccount(), expunge, false); if (result == null) { - logger.error(String.format("DestroyVM remove volume - failed to delete volume %s from instance %s", volume.getId(), volume.getInstanceId())); + logger.error("DestroyVM remove volume - failed to delete volume {} from instance {}", volume, vm); } } finally { // Remove volumeContext and pop vmContext back @@ -8694,7 +8698,7 @@ public boolean unmanageUserVM(Long vmId) { boolean result; try { if (vm.getState() != State.Running && vm.getState() != State.Stopped) { - logger.debug("VM ID = " + vmId + " is not running or stopped, cannot be unmanaged"); + logger.debug("VM {} is not running or stopped, cannot be unmanaged", vm); return false; } @@ -8709,14 +8713,14 @@ public boolean unmanageUserVM(Long vmId) { result = _itMgr.unmanage(vm.getUuid()); if (result) { - cleanupUnmanageVMResources(vm.getId()); + cleanupUnmanageVMResources(vm); unmanageVMFromDB(vm.getId()); publishUnmanageVMUsageEvents(vm, volumes); } else { throw new CloudRuntimeException("Error while unmanaging VM: " + vm.getUuid()); } } catch (Exception e) { - logger.error("Could not unmanage VM " + vm.getUuid(), e); + logger.error("Could not unmanage VM {}", vm, e); throw new CloudRuntimeException(e); } finally { _vmDao.releaseFromLockTable(vm.getId()); @@ -8736,9 +8740,9 @@ private void publishUnmanageVMUsageEvents(UserVmVO vm, List volumes) { /* Cleanup the VM from resources and groups */ - private void cleanupUnmanageVMResources(long vmId) { - cleanupVmResources(vmId); - removeVMFromAffinityGroups(vmId); + private void cleanupUnmanageVMResources(UserVmVO vm) { + cleanupVmResources(vm); + removeVMFromAffinityGroups(vm.getId()); } private void unmanageVMFromDB(long vmId) { @@ -8804,21 +8808,19 @@ private void postProcessingUnmanageVMVolumes(List volumes, UserVmVO vm } private void checkUnmanagingVMOngoingVolumeSnapshots(UserVmVO vm) { - logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vm.getId()); - if (checkStatusOfVolumeSnapshots(vm.getId(), Volume.Type.ROOT)) { + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM {}", vm); + if (checkStatusOfVolumeSnapshots(vm, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, vm unmanage is not permitted, please try again later."); } - logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vm.getId()); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm {}", vm); } private void checkUnmanagingVMVolumes(UserVmVO vm, List volumes) { for (VolumeVO volume : volumes) { if (volume.getInstanceId() == null || !volume.getInstanceId().equals(vm.getId())) { - throw new CloudRuntimeException("Invalid state for volume with ID " + volume.getId() + " of VM " + - vm.getId() +": it is not attached to VM"); + throw new CloudRuntimeException(String.format("Invalid state for volume %s of VM %s: it is not attached to VM", volume, vm)); } else if (volume.getVolumeType() != Volume.Type.ROOT && volume.getVolumeType() != Volume.Type.DATADISK) { - throw new CloudRuntimeException("Invalid type for volume with ID " + volume.getId() + - ": ROOT or DATADISK expected but got " + volume.getVolumeType()); + throw new CloudRuntimeException(String.format("Invalid type for volume %s: ROOT or DATADISK expected but got %s", volume, volume.getVolumeType())); } } } @@ -8866,7 +8868,7 @@ private void collectVmDiskAndNetworkStatistics(Long vmId, State expectedState) { if (uservm != null) { collectVmDiskAndNetworkStatistics(uservm, expectedState); } else { - logger.info(String.format("Skip collecting vm %s disk and network statistics as it is not user vm", uservm)); + logger.info("Skip collecting vmId {} disk and network statistics as it is not user vm", vmId); } } diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index 2061367cf4d4..8d43875190f4 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -428,7 +428,7 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc return createAndPersistVMSnapshot(userVmVo, vsDescription, vmSnapshotName, vsDisplayName, vmSnapshotType); } catch (Exception e) { String msg = e.getMessage(); - logger.error("Create vm snapshot record failed for vm: " + vmId + " due to: " + msg); + logger.error("Create vm snapshot record failed for vm: " + userVmVo + " due to: " + msg); } return null; } @@ -494,7 +494,7 @@ private VMSnapshotStrategy findVMSnapshotStrategy(VMSnapshot vmSnapshot) { VMSnapshotStrategy snapshotStrategy = storageStrategyFactory.getVmSnapshotStrategy(vmSnapshot); if (snapshotStrategy == null) { - throw new CloudRuntimeException("can't find vm snapshot strategy for vmsnapshot: " + vmSnapshot.getId()); + throw new CloudRuntimeException(String.format("can't find vm snapshot strategy for vmsnapshot: %s", vmSnapshot)); } return snapshotStrategy; @@ -572,7 +572,7 @@ private VMSnapshot orchestrateCreateVMSnapshot(Long vmId, Long vmSnapshotId, Boo VolumeVO rootVolume = volumeVos.get(0); if(!rootVolume.getState().equals(Volume.State.Ready)) { - throw new CloudRuntimeException("Create vm to snapshot failed due to vm: " + vmId + " has root disk in " + rootVolume.getState() + " state"); + throw new CloudRuntimeException("Create vm to snapshot failed due to vm: " + userVm + " has root disk in " + rootVolume.getState() + " state"); } VMSnapshotVO vmSnapshot = _vmSnapshotDao.findById(vmSnapshotId); @@ -587,7 +587,7 @@ private VMSnapshot orchestrateCreateVMSnapshot(Long vmId, Long vmSnapshotId, Boo VMSnapshot snapshot = strategy.takeVMSnapshot(vmSnapshot); return snapshot; } catch (Exception e) { - String errMsg = String.format("Failed to create vm snapshot: [%s] due to: %s", vmSnapshotId, e.getMessage()); + String errMsg = String.format("Failed to create vm snapshot: [%s] due to: %s", vmSnapshot, e.getMessage()); logger.debug(errMsg, e); throw new CloudRuntimeException(errMsg, e); } @@ -618,14 +618,14 @@ public boolean deleteVMSnapshot(Long vmSnapshotId) { // check VM snapshot states, only allow to delete vm snapshots in created and error state if (VMSnapshot.State.Ready != vmSnapshot.getState() && VMSnapshot.State.Expunging != vmSnapshot.getState() && VMSnapshot.State.Error != vmSnapshot.getState()) { - throw new InvalidParameterValueException("Can't delete the vm snapshotshot " + vmSnapshotId + " due to it is not in Created or Error, or Expunging State"); + throw new InvalidParameterValueException(String.format("Can't delete the vm snapshotshot %s due to it is not in Created or Error, or Expunging State", vmSnapshot)); } // check if there are other active VM snapshot tasks if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: {}", vmSnapshot); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -683,14 +683,14 @@ private boolean orchestrateDeleteVMSnapshot(Long vmSnapshotId) { List validStates = Arrays.asList(VMSnapshot.State.Ready, VMSnapshot.State.Expunging, VMSnapshot.State.Error, VMSnapshot.State.Allocated); // check VM snapshot states, only allow to delete vm snapshots in ready, expunging, allocated and error state if (!validStates.contains(vmSnapshot.getState())) { - throw new InvalidParameterValueException("Can't delete the vm snapshot " + vmSnapshotId + " due to it is not in " + validStates.toString() + "States"); + throw new InvalidParameterValueException(String.format("Can't delete the vm snapshot %s due to it is not in %sStates", vmSnapshot, validStates.toString())); } // check if there are other active VM snapshot tasks if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: {}", vmSnapshot); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -703,7 +703,7 @@ private boolean orchestrateDeleteVMSnapshot(Long vmSnapshotId) { VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshot); return strategy.deleteVMSnapshot(vmSnapshot); } catch (Exception e) { - logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); + logger.debug("Failed to delete vm snapshot: {}", vmSnapshot, e); return false; } } @@ -722,7 +722,7 @@ public UserVm revertToSnapshot(Long vmSnapshotId) throws InsufficientCapacityExc UserVmVO userVm = _userVMDao.findById(vmId); // check if VM exists if (userVm == null) { - throw new InvalidParameterValueException("Revert vm to snapshot: " + vmSnapshotId + " failed due to vm: " + vmId + " is not found"); + throw new InvalidParameterValueException(String.format("Revert vm to snapshot: %s failed due to vm: %d is not found", vmSnapshotVo, vmId)); } // check if there are other active VM snapshot tasks @@ -828,11 +828,11 @@ protected Map getVmMapDetails(UserVm userVm) { */ protected void changeUserVmServiceOffering(UserVm userVm, VMSnapshotVO vmSnapshotVo) { Map vmDetails = getVmMapDetails(userVm); - boolean result = upgradeUserVmServiceOffering(userVm.getId(), vmSnapshotVo.getServiceOfferingId(), vmDetails); + boolean result = upgradeUserVmServiceOffering(userVm, vmSnapshotVo.getServiceOfferingId(), vmDetails); if (! result){ throw new CloudRuntimeException("VM Snapshot reverting failed due to vm service offering couldn't be changed to the one used when snapshot was taken"); } - logger.debug("Successfully changed service offering to " + vmSnapshotVo.getServiceOfferingId() + " for vm " + userVm.getId()); + logger.debug("Successfully changed service offering to {} for vm {}", _serviceOfferingDao.findById(vmSnapshotVo.getServiceOfferingId()), userVm); } /** @@ -842,16 +842,16 @@ protected void changeUserVmServiceOffering(UserVm userVm, VMSnapshotVO vmSnapsho * @param details vm details * @return if operation was successful */ - protected boolean upgradeUserVmServiceOffering(Long vmId, Long serviceOfferingId, Map details) { + protected boolean upgradeUserVmServiceOffering(UserVm vm, Long serviceOfferingId, Map details) { boolean result; try { - result = _userVmManager.upgradeVirtualMachine(vmId, serviceOfferingId, details); + result = _userVmManager.upgradeVirtualMachine(vm.getId(), serviceOfferingId, details); if (! result){ - logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId); + logger.error("Couldn't change service offering for vm {} to {}", vm, _serviceOfferingDao.findById(serviceOfferingId)); } return result; } catch (ConcurrentOperationException | ResourceUnavailableException | ManagementServerException | VirtualMachineMigrationException e) { - logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId + " due to: " + e.getMessage()); + logger.error("Couldn't change service offering for vm {} to {} due to: {}", vm, _serviceOfferingDao.findById(serviceOfferingId), e.getMessage()); return false; } } @@ -868,9 +868,7 @@ private UserVm orchestrateRevertToVMSnapshot(Long vmSnapshotId) throws Insuffici final UserVmVO userVm = _userVMDao.findById(vmId); // check if VM exists if (userVm == null) { - throw new InvalidParameterValueException("Revert vm to snapshot: " - + vmSnapshotId + " failed due to vm: " + vmId - + " is not found"); + throw new InvalidParameterValueException(String.format("Revert vm to snapshot: %s failed due to vm: %d is not found", vmSnapshotVo, vmId)); } // check if there are other active VM snapshot tasks @@ -901,7 +899,7 @@ private UserVm orchestrateRevertToVMSnapshot(Long vmSnapshotId) throws Insuffici vm = _userVMDao.findById(userVm.getId()); hostId = vm.getHostId(); } catch (Exception e) { - logger.error("Start VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Start VM {} before reverting failed due to {}", userVm, e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } else { @@ -909,7 +907,7 @@ private UserVm orchestrateRevertToVMSnapshot(Long vmSnapshotId) throws Insuffici try { _itMgr.advanceStop(userVm.getUuid(), true); } catch (Exception e) { - logger.error("Stop VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Stop VM {} before reverting failed due to {}", userVm, e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } @@ -932,7 +930,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws CloudR }); return userVm; } catch (Exception e) { - logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); + logger.debug("Failed to revert vmsnapshot: {}", vmSnapshotVo, e); throw new CloudRuntimeException(e.getMessage()); } } @@ -1372,12 +1370,12 @@ public boolean deleteVMSnapshotsFromDB(Long vmId, boolean unmanage) { try { VMSnapshotStrategy strategy = findVMSnapshotStrategy(snapshot); if (! strategy.deleteVMSnapshotFromDB(snapshot, unmanage)) { - logger.error("Couldn't delete vm snapshot with id " + snapshot.getId()); + logger.error("Couldn't delete vm snapshot {}", snapshot); return false; } } catch (CloudRuntimeException e) { - logger.error("Couldn't delete vm snapshot due to: " + e.getMessage()); + logger.error("Couldn't delete vm snapshot {} due to: {}", snapshot, e.getMessage()); } } return true; diff --git a/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java index 01fc96473d23..91bbb349a07e 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java @@ -160,7 +160,7 @@ public ProjectRole findProjectRole(Long roleId, Long projectId) { return null; } if (!(role.getProjectId().equals(projectId))) { - logger.warn(String.format("Project role : %s doesn't belong to the project" + role.getName())); + logger.warn("Project role: {} doesn't belong to the project", role); return null; } return role; diff --git a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java index 60e7093c48b3..87b119542c57 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java @@ -107,7 +107,7 @@ public Role findRole(Long id, boolean ignorePrivateRoles) { return null; } if (!isCallerRootAdmin() && (RoleType.Admin == role.getRoleType() || (!role.isPublicRole() && ignorePrivateRoles))) { - logger.debug(String.format("Role [id=%s, name=%s] is either of 'Admin' type or is private and is only visible to 'Root admins'.", id, role.getName())); + logger.debug("Role [{}] is either of 'Admin' type or is private and is only visible to 'Root admins'.", role); return null; } return role; @@ -128,7 +128,7 @@ public List findRoles(List ids, boolean ignorePrivateRoles) { } for (Role role : roles) { if (!isCallerRootAdmin() && (RoleType.Admin == role.getRoleType() || (!role.isPublicRole() && ignorePrivateRoles))) { - logger.debug(String.format("Role [id=%s, name=%s] is either of 'Admin' type or is private and is only visible to 'Root admins'.", role.getId(), role.getName())); + logger.debug("Role [{}] is either of 'Admin' type or is private and is only visible to 'Root admins'.", role); continue; } result.add(role); diff --git a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index 0ec16f1e748f..d098ef25652b 100644 --- a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -160,7 +160,7 @@ public AffinityGroup createAffinityGroup(final String accountName, final Long pr AffinityGroupVO group = createAffinityGroup(processor, owner, aclType, affinityGroupName, affinityGroupType, description, domainLevel, domainId); if (logger.isDebugEnabled()) { - logger.debug("Created affinity group =" + affinityGroupName); + logger.debug("Created affinity group {}", group); } CallContext.current().putContextParameter(AffinityGroup.class, group.getUuid()); @@ -260,7 +260,7 @@ public boolean deleteAffinityGroup(Long affinityGroupId, String account, Long pr _messageBus.publish(_name, EntityManager.MESSAGE_REMOVE_ENTITY_EVENT, PublishScope.LOCAL, params); if (logger.isDebugEnabled()) { - logger.debug("Deleted affinity group id=" + affinityGroupIdFinal); + logger.debug("Deleted affinity group {}", group); } return true; } @@ -445,12 +445,14 @@ public UserVm updateVMAffinityGroups(Long vmId, List affinityGroupIds) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.getAccount(vmInstance.getAccountId()); + List affinityGroupList = new ArrayList<>(); // check that the affinity groups exist for (Long affinityGroupId : affinityGroupIds) { AffinityGroupVO ag = _affinityGroupDao.findById(affinityGroupId); if (ag == null) { throw new InvalidParameterValueException("Unable to find affinity group by id " + affinityGroupId); } else { + affinityGroupList.add(ag); // verify permissions if (ag.getAclType() == ACLType.Domain) { _accountMgr.checkAccess(caller, null, false, owner, ag); @@ -475,7 +477,7 @@ public UserVm updateVMAffinityGroups(Long vmId, List affinityGroupIds) { } _affinityGroupVMMapDao.updateMap(vmId, affinityGroupIds); if (logger.isDebugEnabled()) { - logger.debug("Updated VM :" + vmId + " affinity groups to =" + affinityGroupIds); + logger.debug("Updated VM {} affinity groups to {}", vmInstance, affinityGroupList); } // APIResponseHelper will pull out the updated affinitygroups. return vmInstance; diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java index 8b05a76d0a96..97e503974cf2 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java @@ -164,7 +164,7 @@ private void conditionallyAddHost(List agentBasedHosts, Host host) { // would be {ResourceState.Creating, ResourceState.Error}; if (!allowedStates.contains(host.getResourceState())) { if (logger.isTraceEnabled()) { - logger.trace(String.format("host is in '%s' state, not adding to the host list, (id = %s)", host.getResourceState(), host.getUuid())); + logger.trace("host ({}) is in '{}' state, not adding to the host list", host, host.getResourceState()); } return; } @@ -174,7 +174,7 @@ private void conditionallyAddHost(List agentBasedHosts, Host host) { && host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.SecondaryStorageVM) { if (logger.isTraceEnabled()) { - logger.trace(String.format("host is of wrong type, not adding to the host list, (id = %s, type = %s)", host.getUuid(), host.getType())); + logger.trace(String.format("host (%s) is of wrong type, not adding to the host list, type = %s", host, host.getType())); } return; } @@ -183,7 +183,7 @@ private void conditionallyAddHost(List agentBasedHosts, Host host) { && ! (host.getHypervisorType() == Hypervisor.HypervisorType.KVM || host.getHypervisorType() == Hypervisor.HypervisorType.LXC)) { if (logger.isTraceEnabled()) { - logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (id = %s, hypervisortype = %s)", host.getUuid(), host.getHypervisorType())); + logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (host: %s, hypervisortype: %s)", host, host.getHypervisorType())); } return; } diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index 6e13ba135df3..37b1797724d1 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -191,7 +191,7 @@ public List listBackupProviderOfferings(final Long zoneId) { throw new PermissionDeniedException("Parameter external can only be specified by a Root Admin, permission denied"); } final BackupProvider backupProvider = getBackupProvider(zoneId); - logger.debug("Listing external backup offerings for the backup provider configured for zone ID " + zoneId); + logger.debug("Listing external backup offerings for the backup provider configured for zone ID {}", dataCenterDao.findById(zoneId)); return backupProvider.listBackupOfferings(zoneId); } @@ -403,8 +403,7 @@ public boolean removeVMFromBackupOffering(final Long vmId, final boolean forced) result = true; } } catch (Exception e) { - logger.error(String.format("Exception caught when trying to remove VM [uuid: %s, name: %s] from the backup offering [uuid: %s, name: %s] due to: [%s].", - vm.getUuid(), vm.getInstanceName(), offering.getUuid(), offering.getName(), e.getMessage()), e); + logger.error("Exception caught when trying to remove VM [{}] from the backup offering [{}] due to: [{}].", vm, offering, e.getMessage(), e); } return result; } @@ -794,8 +793,8 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, HostVO host = restoreInfo.first(); StoragePoolVO datastore = restoreInfo.second(); - logger.debug("Asking provider to restore volume " + backedUpVolumeUuid + " from backup " + backupId + - " (with external ID " + backup.getExternalId() + ") and attach it to VM: " + vm.getUuid()); + logger.debug("Asking provider to restore volume {} from backup {} (with external" + + " ID {}) and attach it to VM: {}", backedUpVolumeUuid, backup, backup.getExternalId(), vm); logger.debug(String.format("Trying to restore volume using host private IP address: [%s].", host.getPrivateIpAddress())); @@ -913,7 +912,7 @@ private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, Lis } volumeInfo.setType(Volume.Type.DATADISK); - logger.debug("Attaching the restored volume to VM " + vm.getId()); + logger.debug("Attaching the restored volume to VM {}", vm); StoragePoolVO pool = primaryDataStoreDao.findByUuid(datastoreUuid); try { return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, volumeInfo, vm, pool.getId(), backup); @@ -1080,8 +1079,10 @@ private void checkStatusOfCurrentlyExecutingBackups() { logger.debug("Next backup scheduled time for VM ID " + backupSchedule.getVmId() + " is " + nextScheduledTime); break; default: - logger.debug(String.format("Found async backup job [id: %s, vmId: %s] with status [%s] and cmd information: [cmd: %s, cmdInfo: %s].", asyncJob.getId(), backupSchedule.getVmId(), - asyncJob.getStatus(), asyncJob.getCmd(), asyncJob.getCmdInfo())); + logger.debug("Found async backup job [id: {}, uuid: {}, vmId: {}] with " + + "status [{}] and cmd information: [cmd: {}, cmdInfo: {}].", + asyncJob.getId(), asyncJob.getUuid(), backupSchedule.getVmId(), + asyncJob.getStatus(), asyncJob.getCmd(), asyncJob.getCmdInfo()); break; } } @@ -1114,15 +1115,15 @@ public void scheduleBackups() { final Account backupAccount = accountService.getAccount(vm.getAccountId()); if (backupAccount == null || backupAccount.getState() == Account.State.DISABLED) { - logger.debug(String.format("Skip backup for VM [uuid: %s, name: %s] since its account has been removed or disabled.", vm.getUuid(), vm.getInstanceName())); + logger.debug("Skip backup for VM ({}) since its account has been removed or disabled.", vm); continue; } if (logger.isDebugEnabled()) { final Date scheduledTimestamp = backupSchedule.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - logger.debug(String.format("Scheduling 1 backup for VM [ID: %s, name: %s, hostName: %s] for backup schedule id: [%s] at [%s].", - vm.getId(), vm.getInstanceName(), vm.getHostName(), backupSchedule.getId(), displayTime)); + logger.debug(String.format("Scheduling 1 backup for VM (%s) for backup schedule (%s) at [%s].", + vm, backupSchedule, displayTime)); } BackupScheduleVO tmpBackupScheduleVO = null; @@ -1219,19 +1220,19 @@ protected void runInContext() { } for (final DataCenter dataCenter : dataCenterDao.listAllZones()) { if (dataCenter == null || isDisabled(dataCenter.getId())) { - logger.debug(String.format("Backup Sync Task is not enabled in zone [%s]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter.getId())); + logger.debug("Backup Sync Task is not enabled in zone [{}]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter); continue; } final BackupProvider backupProvider = getBackupProvider(dataCenter.getId()); if (backupProvider == null) { - logger.warn("Backup provider not available or configured for zone ID " + dataCenter.getId()); + logger.warn("Backup provider not available or configured for zone {}", dataCenter); continue; } List vms = vmInstanceDao.listByZoneWithBackups(dataCenter.getId(), null); if (vms == null || vms.isEmpty()) { - logger.debug(String.format("Can't find any VM to sync backups in zone [id: %s].", dataCenter.getId())); + logger.debug("Can't find any VM to sync backups in zone {}", dataCenter); continue; } @@ -1256,7 +1257,7 @@ private void tryToSyncVMBackups(BackupProvider backupProvider, Map fields = new ArrayList<>(); @@ -1310,7 +1312,7 @@ public BackupOffering updateBackupOffering(UpdateBackupOfferingCmd updateBackupO } if (!backupOfferingDao.update(id, offering)) { - logger.warn(String.format("Couldn't update Backup offering [id: %s] with [%s].", id, String.join(", ", fields))); + logger.warn(String.format("Couldn't update Backup offering (%s) with [%s].", backupOfferingVO, String.join(", ", fields))); } BackupOfferingVO response = backupOfferingDao.findById(id); diff --git a/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java index d4ccac69d5fa..22f8939e7eb7 100644 --- a/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java @@ -195,8 +195,8 @@ public boolean provisionCertificate(final Host host, final Boolean reconnect, fi final Certificate certificate = issueCertificate(csr, Arrays.asList(host.getName(), host.getPrivateIpAddress()), Arrays.asList(host.getPrivateIpAddress(), host.getPublicIpAddress(), host.getStorageIpAddress()), CAManager.CertValidityPeriod.value(), caProvider); return deployCertificate(host, certificate, reconnect, null); } catch (final AgentUnavailableException | OperationTimedoutException e) { - logger.error("Host/agent is not available or operation timed out, failed to setup keystore and generate CSR for host/agent id=" + host.getId() + ", due to: ", e); - throw new CloudRuntimeException("Failed to generate keystore and get CSR from the host/agent id=" + host.getId()); + logger.error("Host/agent is not available or operation timed out, failed to setup keystore and generate CSR for host/agent {}, due to: ", host, e); + throw new CloudRuntimeException(String.format("Failed to generate keystore and get CSR from the host/agent %s", host)); } } @@ -234,11 +234,11 @@ public boolean deployCertificate(final Host host, final Certificate certificate, if (answer.getResult()) { getActiveCertificatesMap().put(host.getPrivateIpAddress(), certificate.getClientCertificate()); if (sshAccessDetails == null && reconnect != null && reconnect) { - logger.info(String.format("Successfully setup certificate on host, reconnecting with agent with id=%d, name=%s, address=%s", host.getId(), host.getName(), host.getPublicIpAddress())); + logger.info("Successfully setup certificate on host, reconnecting with agent [{}] with address={}", host, host.getPublicIpAddress()); try { agentManager.reconnect(host.getId()); } catch (AgentUnavailableException | CloudRuntimeException e) { - logger.debug("Error when reconnecting to host: " + host.getUuid(), e); + logger.debug("Error when reconnecting to host: {}", host, e); } } return true; diff --git a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java index 124ca05cc376..a69bbcd1ee98 100644 --- a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java @@ -222,8 +222,8 @@ public ConsoleEndpoint generateConsoleEndpoint(Long vmId, String extraSecurityTo String sessionUuid = UUID.randomUUID().toString(); return generateAccessEndpoint(vmId, sessionUuid, extraSecurityToken, clientAddress); } catch (Exception e) { - String errorMsg = String.format("Unexepected exception in ConsoleAccessManager - vmId: %s, clientAddress: %s", - vmId, clientAddress); + String errorMsg = String.format("Unexpected exception in ConsoleAccessManager - vmId: %s (%s), clientAddress: %s", + vmId, entityManager.findById(VirtualMachine.class, vmId), clientAddress); logger.error(errorMsg, e); return new ConsoleEndpoint(false, null, "Server Internal Error: " + e.getMessage()); } @@ -264,15 +264,17 @@ protected boolean checkSessionPermission(VirtualMachine vm, Account account) { } catch (PermissionDeniedException ex) { if (accountManager.isNormalUser(account.getId())) { if (logger.isDebugEnabled()) { - logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + - vm.getAccountId() + " does not match the account id in session " + - account.getId() + " and caller is a normal user"); + logger.debug("VM access is denied for VM {}. VM owner " + + "account {} does not match the account id in session {} and " + + "caller is a normal user", vm, + accountManager.getAccount(vm.getAccountId()), account); } } else if ((accountManager.isDomainAdmin(account.getId()) || account.getType() == Account.Type.READ_ONLY_ADMIN) && logger.isDebugEnabled()) { - logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + - vm.getAccountId() + " does not match the account id in session " + - account.getId() + " and the domain-admin caller does not manage the target domain"); + logger.debug("VM access is denied for VM {}. VM owner account {}" + + " does not match the account id in session {} and the " + + "domain-admin caller does not manage the target domain", + vm, accountManager.getAccount(vm.getAccountId()), account); } return false; } @@ -300,23 +302,22 @@ private ConsoleEndpoint generateAccessEndpoint(Long vmId, String sessionUuid, St throw new CloudRuntimeException(msg); } - String vmUuid = vm.getUuid(); if (unsupportedConsoleVMState.contains(vm.getState())) { - msg = "VM " + vmUuid + " must be running to connect console, sending blank response for console access request"; + msg = String.format("VM %s must be running to connect console, sending blank response for console access request", vm); logger.warn(msg); throw new CloudRuntimeException(msg); } Long hostId = vm.getState() != VirtualMachine.State.Migrating ? vm.getHostId() : vm.getLastHostId(); if (hostId == null) { - msg = "VM " + vmUuid + " lost host info, sending blank response for console access request"; + msg = String.format("VM %s lost host info, sending blank response for console access request", vm); logger.warn(msg); throw new CloudRuntimeException(msg); } HostVO host = managementServer.getHostBy(hostId); if (host == null) { - msg = "VM " + vmUuid + "'s host does not exist, sending blank response for console access request"; + msg = String.format("Host for VM %s does not exist, sending blank response for console access request", vm); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -569,7 +570,7 @@ private String getEncryptorPassword() { private void setWebsocketUrl(VirtualMachine vm, ConsoleProxyClientParam param) { String ticket = acquireVncTicketForVmwareVm(vm); if (StringUtils.isBlank(ticket)) { - logger.error("Could not obtain VNC ticket for VM " + vm.getInstanceName()); + logger.error(String.format("Could not obtain VNC ticket for VM %s", vm)); return; } String wsUrl = composeWebsocketUrlForVmwareVm(ticket, param); @@ -590,7 +591,7 @@ private String composeWebsocketUrlForVmwareVm(String ticket, ConsoleProxyClientP */ private String acquireVncTicketForVmwareVm(VirtualMachine vm) { try { - logger.info("Acquiring VNC ticket for VM = " + vm.getHostName()); + logger.info("Acquiring VNC ticket for VM = {}", vm); GetVmVncTicketCommand cmd = new GetVmVncTicketCommand(vm.getInstanceName()); Answer answer = agentManager.send(vm.getHostId(), cmd); GetVmVncTicketAnswer ans = (GetVmVncTicketAnswer) answer; diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index 3e6c460a1692..57321fa09103 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -149,7 +149,7 @@ public Map runDiagnosticsCommand(final RunDiagnosticsCmd cmd) { final Map accessDetails = networkManager.getSystemVMAccessDetails(vmInstance); if (StringUtils.isEmpty(accessDetails.get(NetworkElementCommand.ROUTER_IP))) { - throw new CloudRuntimeException("Unable to set system vm ControlIP for system vm with ID: " + vmId); + throw new CloudRuntimeException("Unable to set system vm ControlIP for system vm: " + vmInstance); } command.setAccessDetail(accessDetails); @@ -227,7 +227,7 @@ public String getDiagnosticsDataCommand(GetDiagnosticsDataCmd cmd) { final long zoneId = vmInstance.getDataCenterId(); VMInstanceVO ssvm = getSecondaryStorageVmInZone(zoneId); if (ssvm == null) { - throw new CloudRuntimeException("No SSVM found in zone with ID: " + zoneId); + throw new CloudRuntimeException("No SSVM found in zone: " + dataCenterDao.findById(zoneId)); } // Secondary Storage install path = "diagnostics_data/diagnostics_files_xxxx.tar @@ -265,7 +265,7 @@ private Pair copyZipFileToSecondaryStorage(VMInstanceVO vmInsta private void configureNetworkElementCommand(NetworkElementCommand cmd, VMInstanceVO vmInstance) { Map accessDetails = networkManager.getSystemVMAccessDetails(vmInstance); if (StringUtils.isBlank(accessDetails.get(NetworkElementCommand.ROUTER_IP))) { - throw new CloudRuntimeException("Unable to set system vm ControlIP for system vm with ID: " + vmInstance.getId()); + throw new CloudRuntimeException(String.format("Unable to set system vm ControlIP for system vm: %s", vmInstance)); } cmd.setAccessDetail(accessDetails); } @@ -282,10 +282,10 @@ private Answer deleteDiagnosticsZipFileInsystemVm(VMInstanceVO vmInstance, Strin configureNetworkElementCommand(cmd, vmInstance); final Answer fileCleanupAnswer = agentManager.easySend(vmInstance.getHostId(), cmd); if (fileCleanupAnswer == null) { - logger.error(String.format("Failed to cleanup diagnostics zip file on vm: %s", vmInstance.getUuid())); + logger.error("Failed to cleanup diagnostics zip file on vm: {}", vmInstance); } else { if (!fileCleanupAnswer.getResult()) { - logger.error(String.format("Zip file cleanup for vm %s has failed with: %s", vmInstance.getUuid(), fileCleanupAnswer.getDetails())); + logger.error("Zip file cleanup for vm {} has failed with: {}", vmInstance, fileCleanupAnswer.getDetails()); } } @@ -328,7 +328,7 @@ private Pair copyToSecondaryStorageVMware(final DataStore store boolean success = false; String mountPoint = mountManager.getMountPoint(store.getUri(), imageStoreDetailsUtil.getNfsVersion(store.getId())); if (StringUtils.isBlank(mountPoint)) { - logger.error("Failed to generate mount point for copying to secondary storage for " + store.getName()); + logger.error("Failed to generate mount point for copying to secondary storage for {}", store); return new Pair<>(false, "Failed to mount secondary storage:" + store.getName()); } @@ -371,7 +371,7 @@ private VMInstanceVO getSecondaryStorageVmInZone(Long zoneId) { private DataStore getImageStore(Long zoneId) { List stores = storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (CollectionUtils.isEmpty(stores)) { - throw new CloudRuntimeException("No Secondary storage found in Zone with Id: " + zoneId); + throw new CloudRuntimeException(String.format("No Secondary storage found in Zone: %s", dataCenterDao.findById(zoneId))); } DataStore imageStore = null; for (DataStore store : stores) { @@ -382,7 +382,7 @@ private DataStore getImageStore(Long zoneId) { } } if (imageStore == null) { - throw new CloudRuntimeException("No suitable secondary storage found to retrieve diagnostics in Zone: " + zoneId); + throw new CloudRuntimeException(String.format("No suitable secondary storage found to retrieve diagnostics in Zone: %s", dataCenterDao.findById(zoneId))); } return imageStore; } @@ -418,7 +418,7 @@ private String getVMSshIp(final VMInstanceVO vmInstance) { Map accessDetails = networkManager.getSystemVMAccessDetails(vmInstance); String controlIP = accessDetails.get(NetworkElementCommand.ROUTER_IP); if (StringUtils.isBlank(controlIP)) { - throw new CloudRuntimeException("Unable to find system vm ssh/control IP for vm with ID: " + vmInstance.getId()); + throw new CloudRuntimeException(String.format("Unable to find system vm ssh/control IP for vm: %s", vmInstance)); } return controlIP; } diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java index 3194af03ac45..4ceffc4ba06d 100644 --- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java @@ -287,7 +287,7 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { VMTemplateStoragePoolVO sPoolRef = vmTemplatePoolDao.findByPoolTemplate(poolId, templateId, null); if (sPoolRef == null) { if (logger.isDebugEnabled()) { - logger.debug("Not found (templateId:" + templateId + " poolId: " + poolId + ") in template_spool_ref, persisting it"); + logger.debug("Not found (template:{} pool: {}) in template_spool_ref, persisting it", template, pool); } DirectDownloadAnswer ans = (DirectDownloadAnswer) answer; sPoolRef = new VMTemplateStoragePoolVO(poolId, templateId, null); @@ -364,14 +364,14 @@ private Answer sendDirectDownloadCommand(DirectDownloadCommand cmd, VMTemplateVO retry --; } if (!downloaded) { - logUsageEvent(template, poolId); + logUsageEvent(template, storagePoolVO); if (!answerDetails.isEmpty()){ Account caller = CallContext.current().getCallingAccount(); if (caller != null && caller.getType() == Account.Type.ADMIN){ errorDetails = String.format(" Details: %s", answerDetails); } } - throw new CloudRuntimeException(String.format("Template %d could not be downloaded on pool %d, failing after trying on several hosts%s", template.getId(), poolId, errorDetails)); + throw new CloudRuntimeException(String.format("Template %s could not be downloaded on pool %s, failing after trying on several hosts%s", template, storagePoolVO, errorDetails)); } return answer; } @@ -379,12 +379,12 @@ private Answer sendDirectDownloadCommand(DirectDownloadCommand cmd, VMTemplateVO /** * Log and persist event for direct download failure */ - private void logUsageEvent(VMTemplateVO template, long poolId) { + private void logUsageEvent(VMTemplateVO template, StoragePoolVO pool) { String event = EventTypes.EVENT_TEMPLATE_DIRECT_DOWNLOAD_FAILURE; if (template.getFormat() == ImageFormat.ISO) { event = EventTypes.EVENT_ISO_DIRECT_DOWNLOAD_FAILURE; } - String description = "Direct Download for template Id: " + template.getId() + " on pool Id: " + poolId + " failed"; + String description = String.format("Direct Download for template: %s on pool: %s failed", template, pool); logger.error(description); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), template.getAccountId(), EventVO.LEVEL_INFO, event, description, template.getId(), ApiCommandResourceType.Template.toString(), 0); } @@ -498,12 +498,12 @@ public Pair> uploadCertif hosts = Collections.singletonList(host); certificateVO = directDownloadCertificateDao.findByAlias(alias, hypervisorType, zoneId); if (certificateVO == null) { - logger.info("Certificate must be uploaded on zone " + zoneId); + logger.info("Certificate must be uploaded on zone {}", () -> dataCenterDao.findById(zoneId)); return new Pair<>(certificateVO, new ArrayList<>()); } } - logger.info("Attempting to upload certificate: " + alias + " to " + hosts.size() + " hosts on zone " + zoneId); + logger.info("Attempting to upload certificate: {} to {} hosts on zone {}", () -> alias, hosts::size, () -> dataCenterDao.findById(zoneId)); int success = 0; int failed = 0; List results = new ArrayList<>(); @@ -513,9 +513,9 @@ public Pair> uploadCertif continue; } HostCertificateStatus hostStatus; - Pair result = provisionCertificate(certificateVO.getId(), host.getId()); + Pair result = provisionCertificate(certificateVO, host); if (!result.first()) { - String msg = "Could not upload certificate " + alias + " on host: " + host.getName() + " (" + host.getUuid() + "): " + result.second(); + String msg = String.format("Could not upload certificate %s on host: %s: %s", alias, host, result.second()); logger.error(msg); failed++; hostStatus = new HostCertificateStatus(CertificateStatus.FAILED, host, result.second()); @@ -530,17 +530,17 @@ public Pair> uploadCertif return new Pair<>(certificateVO, results); } - private Pair setupCertificateOnHost(DirectDownloadCertificate certificate, long hostId) { + private Pair setupCertificateOnHost(DirectDownloadCertificate certificate, Host host) { String certificateStr = certificate.getCertificate(); String alias = certificate.getAlias(); long certificateId = certificate.getId(); - logger.debug("Uploading certificate: " + alias + " to host " + hostId); + logger.debug("Uploading certificate: {} to host {}", alias, host); SetupDirectDownloadCertificateCommand cmd = new SetupDirectDownloadCertificateCommand(certificateStr, alias); - Answer answer = agentManager.easySend(hostId, cmd); + Answer answer = agentManager.easySend(host.getId(), cmd); Pair result; if (answer == null || !answer.getResult()) { - String msg = "Certificate " + alias + " could not be added to host " + hostId; + String msg = String.format("Certificate %s could not be added to host %s", alias, host); if (answer != null) { msg += " due to: " + answer.getDetails(); } @@ -550,13 +550,13 @@ private Pair setupCertificateOnHost(DirectDownloadCertificate c result = new Pair<>(true, "OK"); } - logger.info("Certificate " + alias + " successfully uploaded to host: " + hostId); - DirectDownloadCertificateHostMapVO map = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateId, hostId); + logger.info("Certificate {} successfully uploaded to host: {}", alias, host); + DirectDownloadCertificateHostMapVO map = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateId, host.getId()); if (map != null) { map.setRevoked(false); directDownloadCertificateHostMapDao.update(map.getId(), map); } else { - DirectDownloadCertificateHostMapVO mapVO = new DirectDownloadCertificateHostMapVO(certificateId, hostId); + DirectDownloadCertificateHostMapVO mapVO = new DirectDownloadCertificateHostMapVO(certificateId, host.getId()); directDownloadCertificateHostMapDao.persist(mapVO); } return result; @@ -565,19 +565,22 @@ private Pair setupCertificateOnHost(DirectDownloadCertificate c * Upload and import certificate to hostId on keystore */ public Pair provisionCertificate(long certificateId, long hostId) { - DirectDownloadCertificateVO certificateVO = directDownloadCertificateDao.findById(certificateId); - if (certificateVO == null) { - throw new CloudRuntimeException("Could not find certificate with id " + certificateId + " to upload to host: " + hostId); - } HostVO host = hostDao.findById(hostId); if (host == null) { throw new CloudRuntimeException("Cannot find a host with ID " + hostId); } + DirectDownloadCertificateVO certificateVO = directDownloadCertificateDao.findById(certificateId); + if (certificateVO == null) { + throw new CloudRuntimeException(String.format("Could not find certificate with id %d to upload to host: %s", certificateId, host)); + } + return provisionCertificate(certificateVO, host); + } + + public Pair provisionCertificate(DirectDownloadCertificate certificate, Host host) { if (host.getHypervisorType() != HypervisorType.KVM) { throw new CloudRuntimeException("Cannot provision certificate to host " + host.getName() + " since it is not KVM"); } - - return setupCertificateOnHost(certificateVO, hostId); + return setupCertificateOnHost(certificate, host); } @Override @@ -585,32 +588,33 @@ public boolean syncCertificatesToHost(long hostId, long zoneId) { List zoneCertificates = directDownloadCertificateDao.listByZone(zoneId); if (CollectionUtils.isEmpty(zoneCertificates)) { if (logger.isTraceEnabled()) { - logger.trace("No certificates to sync on host: " + hostId); + logger.trace("No certificates to sync on host: {}", () -> hostDao.findById(hostId)); } return true; } boolean syncCertificatesResult = true; int certificatesSyncCount = 0; - logger.debug("Syncing certificates on host: " + hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Syncing certificates on host: {}", host); for (DirectDownloadCertificateVO certificateVO : zoneCertificates) { DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostId); if (mapping == null) { - logger.debug("Syncing certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", uploading it"); + logger.debug("Syncing certificate {} on host: {}, uploading it", certificateVO, host); Pair result = provisionCertificate(certificateVO.getId(), hostId); if (!result.first()) { - String msg = "Could not sync certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", upload failed: " + result.second(); + String msg = String.format("Could not sync certificate %s on host: %s, upload failed: %s", certificateVO, host, result.second()); logger.error(msg); syncCertificatesResult = false; } else { certificatesSyncCount++; } } else { - logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") already synced on host: " + hostId); + logger.debug("Certificate {} already synced on host: {}", certificateVO, host); } } - logger.debug("Synced " + certificatesSyncCount + " out of " + zoneCertificates.size() + " certificates on host: " + hostId); + logger.debug("Synced {} out of {} certificates on host: {}", certificatesSyncCount, zoneCertificates.size(), host); return syncCertificatesResult; } @@ -621,11 +625,11 @@ private List getCertificateHostMappings(Dire } else { DirectDownloadCertificateHostMapVO hostMap = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificate.getId(), hostId); if (hostMap == null) { - String msg = "Certificate " + certificate.getAlias() + " cannot be revoked from host " + hostId + " as it is not available on the host"; + String msg = String.format("Certificate %s cannot be revoked from host %s as it is not available on the host", certificate, hostDao.findById(hostId)); logger.error(msg); throw new CloudRuntimeException(msg); } else if (hostMap.isRevoked()) { - logger.debug("Certificate " + certificate.getAlias() + " was already revoked from host " + hostId + " skipping it"); + logger.debug("Certificate {} was already revoked from host {} skipping it", certificate, hostDao.findById(hostId)); return new LinkedList<>(); } maps = Collections.singletonList(hostMap); @@ -675,21 +679,21 @@ public List revokeCertificate(DirectDownloadCertificate c if (host == null || host.getDataCenterId() != zoneId || host.getHypervisorType() != HypervisorType.KVM) { if (host != null) { String reason = host.getDataCenterId() != zoneId ? "Host is not in the zone " + zoneId : "Host hypervisor is not KVM"; - logger.debug("Skipping host " + host.getName() + ": " + reason); + logger.debug("Skipping host {}: {}", host, reason); hostStatus = new HostCertificateStatus(CertificateStatus.SKIPPED, host, reason); hostsList.add(hostStatus); } skipped++; continue; } - Pair result = revokeCertificateAliasFromHost(certificateAlias, mappingHostId); + Pair result = revokeCertificateAliasFromHost(certificateAlias, host); if (!result.first()) { - String msg = "Could not revoke certificate from host: " + mappingHostId + ": " + result.second(); + String msg = String.format("Could not revoke certificate from host: %s: %s", host, result.second()); logger.error(msg); hostStatus = new HostCertificateStatus(CertificateStatus.FAILED, host, result.second()); failed++; } else { - logger.info("Certificate " + certificateAlias + " revoked from host " + mappingHostId); + logger.info("Certificate {} revoked from host {}", certificate, host); map.setRevoked(true); hostStatus = new HostCertificateStatus(CertificateStatus.REVOKED, host, null); success++; @@ -697,8 +701,7 @@ public List revokeCertificate(DirectDownloadCertificate c } hostsList.add(hostStatus); } - logger.info(String.format("Certificate alias %s revoked from: %d hosts, %d failed, %d skipped", - certificateAlias, success, failed, skipped)); + logger.info("Certificate alias {} revoked from: {} hosts, {} failed, {} skipped", certificate, success, failed, skipped); return hostsList; } @@ -726,13 +729,13 @@ public List getCertificateHostsMapping(Long ce return new LinkedList<>(directDownloadCertificateHostMapDao.listByCertificateId(certificateId)); } - protected Pair revokeCertificateAliasFromHost(String alias, Long hostId) { + protected Pair revokeCertificateAliasFromHost(String alias, Host host) { RevokeDirectDownloadCertificateCommand cmd = new RevokeDirectDownloadCertificateCommand(alias); try { - Answer answer = agentManager.send(hostId, cmd); + Answer answer = agentManager.send(host.getId(), cmd); return new Pair<>(answer != null && answer.getResult(), answer != null ? answer.getDetails() : ""); } catch (AgentUnavailableException | OperationTimedoutException e) { - logger.error("Error revoking certificate " + alias + " from host " + hostId, e); + logger.error("Error revoking certificate {} from host {}", alias, host, e); return new Pair<>(false, e.getMessage()); } } @@ -813,15 +816,11 @@ protected void runInContext() { for (HostVO hostVO : hostsToUpload) { DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostVO.getId()); if (mapping == null) { - logger.debug("Certificate " + certificateVO.getId() + - " (" + certificateVO.getAlias() + ") was not uploaded to host: " + hostVO.getId() + - " uploading it"); + logger.debug("Certificate {} was not uploaded to host: {} uploading it", certificateVO, hostVO); Pair result = directDownloadManager.provisionCertificate(certificateVO.getId(), hostVO.getId()); - logger.debug("Certificate " + certificateVO.getAlias() + " " + - (result.first() ? "uploaded" : "could not be uploaded") + - " to host " + hostVO.getId()); + logger.debug("Certificate {} {} to host {}", certificateVO, result.first() ? "uploaded" : "could not be uploaded", hostVO); if (!result.first()) { - logger.error("Certificate " + certificateVO.getAlias() + " failed: " + result.second()); + logger.error("Certificate {} failed: {}", certificateVO, result.second()); } } } diff --git a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java index 2ab252430d5a..aa9727cf33fb 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java @@ -294,7 +294,7 @@ public void validateHAProviderConfigForResource(final Long resourceId, final HAR } if (!host.getHypervisorType().toString().equals(haProvider.resourceSubType().toString())) { - throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Incompatible haprovider provided [%s] for the resource [%s] of hypervisor type: [%s].", haProvider.resourceSubType().toString(), host.getId(),host.getHypervisorType())); + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Incompatible haprovider provided [%s] for the resource [%s] of hypervisor type: [%s].", haProvider.resourceSubType().toString(), host.getUuid(), host.getHypervisorType())); } } } @@ -307,10 +307,10 @@ public Boolean isVMAliveOnHost(final Host host) throws Investigator.UnknownVM { final HAConfig haConfig = haConfigDao.findHAResource(host.getId(), HAResource.ResourceType.Host); if (haConfig != null) { if (haConfig.getState() == HAConfig.HAState.Fenced) { - logger.debug(String.format("HA: Host [%s] is fenced.", host.getId())); + logger.debug("HA: Host [{}] is fenced.", host); return false; } - logger.debug(String.format("HA: Host [%s] is alive.", host.getId())); + logger.debug("HA: Host [{}] is alive.", host); return true; } throw new Investigator.UnknownVM(); @@ -320,10 +320,10 @@ public Status getHostStatus(final Host host) { final HAConfig haConfig = haConfigDao.findHAResource(host.getId(), HAResource.ResourceType.Host); if (haConfig != null) { if (haConfig.getState() == HAConfig.HAState.Fenced) { - logger.debug(String.format("HA: Agent [%s] is available/suspect/checking Up.", host.getId())); + logger.debug("HA: Agent [{}] is available/suspect/checking Up.", host); return Status.Down; } else if (haConfig.getState() == HAConfig.HAState.Degraded || haConfig.getState() == HAConfig.HAState.Recovering || haConfig.getState() == HAConfig.HAState.Fencing) { - logger.debug(String.format("HA: Agent [%s] is disconnected. State: %s, %s.", host.getId(), haConfig.getState(), haConfig.getState().getDescription())); + logger.debug("HA: Agent [{}] is disconnected. State: {}, {}.", host, haConfig.getState(), haConfig.getState().getDescription()); return Status.Disconnected; } return Status.Up; diff --git a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java index af76d2d4ae71..81a85b03cf09 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java +++ b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java @@ -72,7 +72,7 @@ public boolean isInMaintenanceMode(final Host host) { public void fenceSubResources(final Host r) { if (r.getState() != Status.Down) { try { - logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host id=" + r.getId()); + logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host {}", r); agentManager.disconnectWithoutInvestigation(r.getId(), Event.HostDown); oldHighAvailabilityManager.scheduleRestartForVmsOnHost((HostVO)r, true); } catch (Exception e) { diff --git a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java index 9cc65e796a84..7441d480628f 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java +++ b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java @@ -100,7 +100,7 @@ public Boolean call() throws HACheckerException, HAFenceException, HARecoveryExc logger.warn("Exception occurred while running " + getTaskType() + " on a resource: " + e.getMessage(), e.getCause()); throwable = e.getCause(); } catch (TimeoutException e) { - logger.trace(getTaskType() + " operation timed out for resource id:" + resource.getId()); + logger.trace("{} operation timed out for resource id:{}", getTaskType(), resource); } processResult(result, throwable); return result; diff --git a/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java index f05e216f1eb5..bbfa83dcf434 100644 --- a/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java @@ -180,8 +180,7 @@ public ApplicationLoadBalancerRuleVO doInTransaction(TransactionStatus status) t if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + - newRule.getSourcePortStart().intValue() + ", instance port " + newRule.getDefaultPortStart() + " is added successfully."); + logger.debug("Load balancer rule {} for Ip address {}, source port {}, instance port {} is added successfully.", newRule, newRule.getSourceIp().addr(), newRule.getSourcePortStart(), newRule.getDefaultPortStart()); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); Network ntwk = _networkModel.getNetwork(newRule.getNetworkId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, newRule.getAccountId(), ntwk.getDataCenterId(), newRule.getId(), null, @@ -524,7 +523,7 @@ protected void detectInternalLbRulesConflict(ApplicationLoadBalancerRule newLbRu .intValue())) { throw new NetworkRuleConflictException("The range specified, " + newLbRule.getSourcePortStart().intValue() + "-" + newLbRule.getSourcePortEnd().intValue() + - ", conflicts with rule " + lbRule.getId() + " which has " + lbRule.getSourcePortStart().intValue() + "-" + lbRule.getSourcePortEnd().intValue()); + ", conflicts with rule " + lbRule + " which has " + lbRule.getSourcePortStart().intValue() + "-" + lbRule.getSourcePortEnd().intValue()); } } diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java index 817cfe07e58e..debe9eee6da7 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java @@ -26,6 +26,7 @@ import com.cloud.network.dao.NetworkDetailVO; import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.dao.NsxProviderDao; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.element.NsxProviderVO; import com.cloud.network.router.VirtualRouter; import com.cloud.storage.DiskOfferingVO; @@ -94,6 +95,7 @@ public class RouterDeploymentDefinition { protected DomainRouterDao routerDao; protected NsxProviderDao nsxProviderDao; protected PhysicalNetworkServiceProviderDao physicalProviderDao; + protected PhysicalNetworkDao pNtwkDao; protected NetworkModel networkModel; protected VirtualRouterProviderDao vrProviderDao; protected NetworkOfferingDao networkOfferingDao; @@ -257,7 +259,7 @@ protected void findOrDeployVirtualRouter() throws ConcurrentOperationException, protected void lock() { final Network lock = networkDao.acquireInLockTable(guestNetwork.getId(), NetworkOrchestrationService.NetworkLockTimeout.value()); if (lock == null) { - throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock network %s", guestNetwork)); } tableLockId = lock.getId(); } @@ -266,7 +268,7 @@ protected void unlock() { if (tableLockId != null) { networkDao.releaseFromLockTable(tableLockId); if (logger.isDebugEnabled()) { - logger.debug("Lock is released for network id " + tableLockId + " as a part of router startup in " + dest); + logger.debug(String.format("Lock is released for network [id: %d] (%s) as a part of router startup in %s", tableLockId, guestNetwork, dest)); } } } @@ -309,7 +311,7 @@ protected List findDestinations() { // If List size is one, we already have a starting or running VR, skip deployment if (virtualRouters.size() == 1) { - logger.debug("Skipping VR deployment: Found a running or starting VR in Pod " + pod.getName() + " id=" + podId); + logger.debug(String.format("Skipping VR deployment: Found a running or starting VR in Pod %s", pod)); continue; } // Add new DeployDestination for this pod @@ -429,7 +431,7 @@ private void verifyServiceOfferingByUuid(String offeringUuid) { DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); boolean isLocalStorage = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dest.getDataCenter().getId()); if (isLocalStorage == diskOffering.isUseLocalStorage()) { - logger.debug(String.format("Service offering %s (uuid: %s) will be used on virtual router", serviceOffering.getName(), serviceOffering.getUuid())); + logger.debug(String.format("Service offering %s will be used on virtual router", serviceOffering)); serviceOfferingId = serviceOffering.getId(); } } @@ -452,7 +454,7 @@ protected void findVirtualProvider() { final PhysicalNetworkServiceProvider provider = physicalProviderDao.findByServiceProvider(physicalNetworkId, type.toString()); if (provider == null) { - throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s", type.toString(), physicalNetworkId)); + throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s", type.toString(), pNtwkDao.findById(physicalNetworkId))); } vrProvider = vrProviderDao.findByNspIdAndType(provider.getId(), type); diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java index 405575c65b11..b9ff50f369d8 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java @@ -36,7 +36,6 @@ import com.cloud.network.PhysicalNetwork; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.VirtualRouterProvider.Type; -import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.dao.VpcDao; @@ -50,7 +49,6 @@ public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { protected VpcDao vpcDao; protected VpcOfferingDao vpcOffDao; - protected PhysicalNetworkDao pNtwkDao; protected VpcManager vpcMgr; protected VlanDao vlanDao; @@ -78,7 +76,7 @@ public boolean isVpcRouter() { protected void lock() { final Vpc vpcLock = vpcDao.acquireInLockTable(vpc.getId()); if (vpcLock == null) { - throw new ConcurrentOperationException("Unable to lock vpc " + vpc.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock vpc %s", vpc)); } tableLockId = vpcLock.getId(); } @@ -88,7 +86,7 @@ protected void unlock() { if (tableLockId != null) { vpcDao.releaseFromLockTable(tableLockId); if (logger.isDebugEnabled()) { - logger.debug("Lock is released for vpc id " + tableLockId + " as a part of router startup in " + dest); + logger.debug(String.format("Lock is released for vpc [id: %d] (%s) as a part of router startup in %s", tableLockId, vpc, dest)); } } } @@ -166,7 +164,7 @@ protected void findVirtualProvider() { for (final PhysicalNetwork pNtwk : pNtwks) { final PhysicalNetworkServiceProvider provider = physicalProviderDao.findByServiceProvider(pNtwk.getId(), Type.VPCVirtualRouter.toString()); if (provider == null) { - throw new CloudRuntimeException("Cannot find service provider " + Type.VPCVirtualRouter.toString() + " in physical network " + pNtwk.getId()); + throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s", Type.VPCVirtualRouter.toString(), pNtwk)); } vrProvider = vrProviderDao.findByNspIdAndType(provider.getId(), Type.VPCVirtualRouter); if (vrProvider != null) { diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java index 5c1fc5e9ac6e..936d9cfb3d61 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java @@ -107,7 +107,7 @@ public boolean applyStaticRoutes(final List staticRoutes, fi result = result && routesRules.accept(_advancedVisitor, router); } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending StaticRoute command to the backend"); + logger.debug("Router {} is in {}, so not sending StaticRoute command to the backend", router, router.getState()); } else { logger.warn("Unable to apply StaticRoute, virtual router is not in the right state " + router.getState()); @@ -193,7 +193,7 @@ public boolean associatePublicIP(final Network network, final List bpgPeers, if (router.getState() == State.Running) { result = bgpPeersRules.accept(_advancedVisitor, router); } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending BgpPeer command to the backend"); + logger.debug("Router {} is in {}, so not sending BgpPeer command to the backend", router, router.getState()); } else { logger.warn("Unable to apply BgpPeer, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply BgpPeer on the backend," + " virtual router is not in the right state", DataCenter.class, diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java index 65d702b71380..a7000f702ec9 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java @@ -192,7 +192,7 @@ public boolean applyLoadBalancingRules(final Network network, final List rules, final VirtualRouter router) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - logger.debug("No firewall rules to be applied for network " + network.getId()); + logger.debug("No firewall rules to be applied for network {}", network); return true; } @@ -231,7 +231,7 @@ public boolean applyFirewallRules(final Network network, final List rules, final VirtualRouter router) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - logger.debug("No static nat rules to be applied for network " + network.getId()); + logger.debug("No static nat rules to be applied for network {}", network); return true; } @@ -251,7 +251,7 @@ public boolean applyStaticNats(final Network network, final List ipAddress, final VirtualRouter router) throws ResourceUnavailableException { if (ipAddress == null || ipAddress.isEmpty()) { - logger.debug("No ip association rules to be applied for network " + network.getId()); + logger.debug("No ip association rules to be applied for network {}", network); return true; } @@ -271,7 +271,7 @@ public boolean associatePublicIP(final Network network, final List users, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { logger.warn("Failed to add/remove VPN users: no router found for account and zone"); - throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); + throw new ResourceUnavailableException(String.format("Unable to assign ip addresses, domR doesn't exist for network %s", network), DataCenter.class, network.getDataCenterId()); } logger.debug("APPLYING BASIC VPN RULES"); @@ -281,7 +281,7 @@ public String[] applyVpnUsers(final Network network, final List ruleApplierWrapper) throws ResourceUnavailableException { if (router == null) { - logger.warn("Unable to apply " + typeString + ", virtual router doesn't exist in the network " + network.getId()); + logger.warn("Unable to apply {}, virtual router doesn't exist in the network {}", typeString, network); throw new ResourceUnavailableException("Unable to apply " + typeString, DataCenter.class, network.getDataCenterId()); } @@ -383,7 +383,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin throw new ResourceUnavailableException("Unable to process due to the stop pending router " + router.getInstanceName() + " haven't been stopped after it's host coming back!", DataCenter.class, router.getDataCenterId()); } - logger.debug("Router " + router.getInstanceName() + " is stop pending, so not sending apply " + typeString + " commands to the backend"); + logger.debug("Router {} is stop pending, so not sending apply {} commands to the backend", router, typeString); return false; } @@ -391,7 +391,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin result = ruleApplier.accept(getVisitor(), router); connectedRouters.add(router); } catch (final AgentUnavailableException e) { - logger.warn(msg + router.getInstanceName(), e); + logger.warn("{}{}", msg, router, e); disconnectedRouters.add(router); } @@ -405,7 +405,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending apply " + typeString + " commands to the backend"); + logger.debug("Router {} is in {}, so not sending apply {} commands to the backend", router, router.getState(), typeString); } else { logger.warn("Unable to apply " + typeString + ", virtual router is not in the right state " + router.getState()); if (isZoneBasic && isPodLevelException) { @@ -429,7 +429,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin } } else if (!disconnectedRouters.isEmpty()) { if (logger.isDebugEnabled()) { - logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + logger.debug("{}{}", msg, router); } if (isZoneBasic && isPodLevelException) { throw new ResourceUnavailableException(msg, Pod.class, podId); diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java index 02600b87f290..e4481dab548c 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java @@ -29,6 +29,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.response.OutOfBandManagementResponse; import org.apache.cloudstack.context.CallContext; @@ -73,9 +75,13 @@ @Component public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOfBandManagementService, Manager, Configurable { + @Inject + private ClusterDao clusterDao; @Inject private ClusterDetailsDao clusterDetailsDao; @Inject + private DataCenterDao dataCenterDao; + @Inject private DataCenterDetailsDao dataCenterDetailsDao; @Inject private OutOfBandManagementDao outOfBandManagementDao; @@ -191,8 +197,8 @@ private void sendAuthError(final Host host, final String message) { if (sentCount != null && sentCount <= 0) { boolean concurrentUpdateResult = hostAlertCache.asMap().replace(host.getId(), sentCount, sentCount+1L); if (concurrentUpdateResult) { - final String subject = String.format("Out-of-band management auth-error detected for %s in cluster [id: %d] and zone [id: %d].", host, host.getClusterId(), host.getDataCenterId()); - logger.error(subject + ": " + message); + final String subject = String.format("Out-of-band management auth-error detected for %s in cluster [%s] and zone [%s].", host, clusterDao.findById(host.getClusterId()), dataCenterDao.findById(host.getDataCenterId())); + logger.error("{}: {}", subject, message); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR, host.getDataCenterId(), host.getPodId(), subject, message); } } @@ -254,7 +260,7 @@ private boolean isOutOfBandManagementEnabledForHost(Long hostId) { Host host = hostDao.findById(hostId); if (host == null || host.getResourceState() == ResourceState.Degraded) { String state = host != null ? String.valueOf(host.getResourceState()) : null; - logger.debug(String.format("Host [id=%s, state=%s] was removed or placed in Degraded state by the Admin.", hostId, state)); + logger.debug("Host [id={}, uuid={}, state={}] was removed or placed in Degraded state by the Admin.", hostId, host != null ? host.getUuid() : "", state); return false; } @@ -474,7 +480,7 @@ public Boolean doInTransaction(TransactionStatus status) { try { driverResponse = driver.execute(changePasswordCmd); } catch (Exception e) { - logger.error("Out-of-band management change password failed due to driver error: " + e.getMessage()); + logger.error("Out-of-band management change password for {} failed due to driver error: {}", host, e.getMessage()); throw new CloudRuntimeException(String.format("Failed to change out-of-band management password for %s due to driver error: %s", host, e.getMessage())); } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java index 487a11c97526..7f7e833e6570 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java @@ -50,8 +50,7 @@ public void run() { try { service.executePowerOperation(host, powerOperation, null); } catch (Exception e) { - logger.warn(String.format("Out-of-band management background task operation=%s for host %s failed with: %s", - powerOperation.name(), host.getName(), e.getMessage())); + logger.warn("Out-of-band management background task operation={} for host {} failed with: {}", powerOperation.name(), host, e.getMessage()); String eventMessage = String .format("Error while issuing out-of-band management action %s for host: %s", powerOperation.name(), host.getName()); diff --git a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index 3680c869eb19..e858e7efe4dc 100644 --- a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -24,6 +24,7 @@ import javax.inject.Inject; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd; @@ -79,6 +80,8 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR @Inject GlobalLoadBalancerLbRuleMapDao _gslbLbMapDao; @Inject + DataCenterDao zoneDao; + @Inject RegionDao _regionDao; @Inject RulesManager _rulesMgr; @@ -160,7 +163,7 @@ public GlobalLoadBalancerRuleVO doInTransaction(TransactionStatus status) { } }); - logger.debug("successfully created new global load balancer rule for the account " + gslbOwner.getId()); + logger.debug("successfully created new global load balancer rule for the account {}", gslbOwner); return newGslbRule; } @@ -284,7 +287,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // apply the gslb rule on to the back end gslb service providers on zones participating in gslb if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - logger.warn("Failed to add load balancer rules " + newLbRuleIds + " to global load balancer rule id " + gslbRuleId); + logger.warn("Failed to add load balancer rules {} to global load balancer rule id {}", newLbRuleIds, gslbRule); CloudRuntimeException ex = new CloudRuntimeException("Failed to add load balancer rules to GSLB rule "); throw ex; } @@ -387,7 +390,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // apply the gslb rule on to the back end gslb service providers if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - logger.warn("Failed to remove load balancer rules " + lbRuleIdsToremove + " from global load balancer rule id " + gslbRuleId); + logger.warn("Failed to remove load balancer rules {} from global load balancer rule id {}", lbRuleIdsToremove, gslbRule); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove load balancer rule ids from GSLB rule "); throw ex; } @@ -447,7 +450,7 @@ private void revokeGslbRule(final long gslbRuleId, Account caller) { if (gslbRule.getState() == com.cloud.region.ha.GlobalLoadBalancerRule.State.Staged) { if (logger.isDebugEnabled()) { - logger.debug("Rule Id: " + gslbRuleId + " is still in Staged state so just removing it."); + logger.debug("Rule: {} is still in Staged state so just removing it.", gslbRule); } _gslbRuleDao.remove(gslbRuleId); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE, gslbRule.getAccountId(), 0, gslbRule.getId(), gslbRule.getName(), @@ -542,7 +545,7 @@ public GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalan _gslbRuleDao.update(gslbRule.getId(), gslbRule); try { - logger.debug("Updating global load balancer with id " + gslbRule.getUuid()); + logger.debug("Updating global load balancer with id {}", gslbRule); // apply the gslb rule on to the back end gslb service providers on zones participating in gslb applyGlobalLoadBalancerRuleConfig(gslbRuleId, false); @@ -697,14 +700,14 @@ private boolean applyGlobalLoadBalancerRuleConfig(long gslbRuleId, boolean revok } @Override - public boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long accountId) throws com.cloud.exception.ResourceUnavailableException { - List gslbRules = _gslbRuleDao.listByAccount(accountId); + public boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, Account account) throws com.cloud.exception.ResourceUnavailableException { + List gslbRules = _gslbRuleDao.listByAccount(account.getId()); if (gslbRules != null && !gslbRules.isEmpty()) { for (GlobalLoadBalancerRule gslbRule : gslbRules) { revokeGslbRule(gslbRule.getId(), caller); } } - logger.debug("Successfully cleaned up GSLB rules for account id=" + accountId); + logger.debug("Successfully cleaned up GSLB rules for account {}", account); return true; } diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index 64d9b3467e3f..70808458b3ca 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -92,30 +92,29 @@ public class SnapshotHelper { * @param snapInfo the snapshot info to delete. */ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { - if (!kvmSnapshotOnlyInPrimaryStorage) { - if (snapInfo != null) { - logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); - } + if (snapInfo == null) { + logger.warn("Unable to expunge snapshot due to its info is null."); return; } - if (snapInfo == null) { - logger.warn("Unable to expunge snapshot due to its info is null."); + if (!kvmSnapshotOnlyInPrimaryStorage) { + logger.trace("Snapshot [{}] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getSnapshotVO()); return; } if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) { - logger.debug(String.format("Expunge template for Snapshot [%s] is called for primary storage role. Not expunging it, " + - "but we will still expunge the database reference of the snapshot for image storage role if any", snapInfo.getId())); + logger.debug("Expunge template for Snapshot [{}] is called for primary storage role. Not expunging it, " + + "but we will still expunge the database reference of the snapshot for image storage role if any", snapInfo.getSnapshotVO()); } else { - logger.debug(String.format("Expunging snapshot [%s] due to it is a temporary backup to create a volume from snapshot. It is occurring because the global setting [%s]" - + " has the value [%s].", snapInfo.getId(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot)); + logger.debug("Expunging snapshot [{}] due to it is a temporary backup to create a volume from snapshot." + + " It is occurring because the global setting [{}] has the value [{}].", + snapInfo.getSnapshotVO(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot); try { snapshotService.deleteSnapshot(snapInfo); } catch (CloudRuntimeException ex) { - logger.warn(String.format("Unable to delete the temporary snapshot [%s] on secondary storage due to [%s]. We still will expunge the database reference, consider" - + " manually deleting the file [%s].", snapInfo.getId(), ex.getMessage(), snapInfo.getPath()), ex); + logger.warn("Unable to delete the temporary snapshot [{}] on secondary storage due to [{}]. We still will expunge the database reference, consider" + + " manually deleting the file [{}].", snapInfo, ex.getMessage(), snapInfo.getPath(), ex); } } @@ -136,7 +135,7 @@ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, Sn public SnapshotInfo backupSnapshotToSecondaryStorageIfNotExists(SnapshotInfo snapInfo, DataStoreRole dataStoreRole, Snapshot snapshot, boolean kvmSnapshotOnlyInPrimaryStorage) throws CloudRuntimeException { if (!isSnapshotBackupable(snapInfo, dataStoreRole, kvmSnapshotOnlyInPrimaryStorage)) { logger.trace(String.format("Snapshot [%s] is already on secondary storage or is not a KVM snapshot that is only kept in primary storage. Therefore, we do not back it up." - + " up.", snapInfo.getId())); + + " up.", snapInfo.getSnapshotVO())); return snapInfo; } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index bd3a5655e914..644ed5eae132 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -426,7 +426,7 @@ private List getAdditionalNameFilters(Cluster cluster) { } } } catch (Exception e) { - logger.warn(String.format("Unable to find volume file name for volume ID: %s while adding filters unmanaged VMs", volumeVO.getUuid()), e); + logger.warn("Unable to find volume file name for volume: {} while adding filters unmanaged VMs", volumeVO, e); } if (!volumeFileNames.isEmpty()) { additionalNameFilter.addAll(volumeFileNames); @@ -488,7 +488,7 @@ private ServiceOfferingVO getUnmanagedInstanceServiceOffering(final UnmanagedIns try { cpuSpeed = Integer.parseInt(details.get(VmDetailConstants.CPU_SPEED)); } catch (Exception e) { - logger.error(String.format("Failed to get CPU speed for importing VM [%s] due to [%s].", instance.getName(), e.getMessage()), e); + logger.error("Failed to get CPU speed for importing VM [{}] due to [{}].", instance, e.getMessage(), e); } } Map parameters = new HashMap<>(); @@ -887,7 +887,7 @@ private void cleanupFailedImportVM(final UserVm userVm) { try { networkOrchestrationService.release(profile, true); } catch (Exception e) { - logger.error(String.format("Unable to release NICs for unsuccessful import unmanaged VM: %s", userVm.getInstanceName()), e); + logger.error("Unable to release NICs for unsuccessful import unmanaged VM: {}", userVm, e); nicDao.removeNicsForInstance(userVm.getId()); } // Remove vm @@ -901,12 +901,12 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during VM import")); } if (sourceHost == null || serviceOffering == null || diskProfileStoragePoolList == null) { - logger.error(String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); + logger.error(String.format("Failed to check migrations need during import, VM: %s", userVm)); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); } if (!hostSupportsServiceOfferingAndTemplate(sourceHost, serviceOffering, template)) { - logger.debug(String.format("VM %s needs to be migrated", vm.getUuid())); + logger.debug("VM {} needs to be migrated", vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, template, serviceOffering, owner, null); profile.setServiceOffering(serviceOfferingDao.findById(vm.getId(), serviceOffering.getId())); DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList(); @@ -916,7 +916,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm.getInstanceName(), e.getMessage()); + String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm, e.getMessage()); logger.warn(errorMsg, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); @@ -936,7 +936,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ } vm = userVmManager.getUserVm(vm.getId()); } catch (Exception e) { - String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm.getInstanceName(), e.getMessage()); + String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm, e.getMessage()); logger.error(errorMsg, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); @@ -961,7 +961,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ if (poolSupportsOfferings) { continue; } - logger.debug(String.format("Volume %s needs to be migrated", volumeVO.getUuid())); + logger.debug("Volume {} needs to be migrated", volumeVO); Pair, List> poolsPair = managementService.listStoragePoolsForSystemMigrationOfVolume(profile.getVolumeId(), null, null, null, null, false, true); if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) { cleanupFailedImportVM(vm); @@ -995,7 +995,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume ID: %s migration as no suitable pool found", userVm.getInstanceName(), volumeVO.getUuid())); } else { - logger.debug(String.format("Found storage pool %s(%s) for migrating the volume %s to", storagePool.getName(), storagePool.getUuid(), volumeVO.getUuid())); + logger.debug("Found storage pool {} for migrating the volume {} to", storagePool, volumeVO); } try { Volume volume = null; @@ -1007,15 +1007,15 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ if (volume == null) { String msg = ""; if (vm.getState().equals(VirtualMachine.State.Running)) { - msg = String.format("Live migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid()); + msg = String.format("Live migration for volume: %s to destination pool: %s failed", volumeVO, storagePool); } else { - msg = String.format("Migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid()); + msg = String.format("Migration for volume: %s to destination pool: %s failed", volumeVO, storagePool); } logger.error(msg); throw new CloudRuntimeException(msg); } } catch (Exception e) { - logger.error(String.format("VM import failed for unmanaged vm: %s during volume migration", vm.getInstanceName()), e); + logger.error("VM import failed for unmanaged vm: {} during volume migration", vm, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume migration. %s", userVm.getInstanceName(), StringUtils.defaultString(e.getMessage()))); } @@ -1025,7 +1025,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOfferingVO serviceOfferingVO, VirtualMachineTemplate templateVO) { if (userVm == null || serviceOfferingVO == null) { - logger.error(String.format("Failed to publish usage records during VM import because VM [%s] or ServiceOffering [%s] is null.", userVm, serviceOfferingVO)); + logger.error("Failed to publish usage records during VM import because VM [{}] or ServiceOffering [{}] is null.", userVm, serviceOfferingVO); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "VM import failed for Unmanaged VM during publishing Usage Records."); } @@ -1042,7 +1042,7 @@ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOffer userVm.getHypervisorType().toString(), VirtualMachine.class.getName(), userVm.getUuid(), userVm.isDisplayVm()); } } catch (Exception e) { - logger.error(String.format("Failed to publish usage records during VM import for unmanaged VM [%s] due to [%s].", userVm.getInstanceName(), e.getMessage()), e); + logger.error("Failed to publish usage records during VM import for unmanaged VM [{}] due to [{}].", userVm, e.getMessage(), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm %s during publishing usage records", userVm.getInstanceName())); } @@ -1054,7 +1054,7 @@ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOffer UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), null, volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume()); } catch (Exception e) { - logger.error(String.format("Failed to publish volume ID: %s usage records during VM import", volume.getUuid()), e); + logger.error("Failed to publish volume ID: {} usage records during VM import", volume, e); } resourceLimitService.incrementVolumeResourceCount(userVm.getAccountId(), volume.isDisplayVolume(), volume.getSize(), diskOfferingDao.findById(volume.getDiskOfferingId())); @@ -1118,7 +1118,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI try { validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details, cluster.getHypervisorType()); } catch (Exception e) { - String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance.getName(), serviceOffering.getUuid(), StringUtils.defaultIfEmpty(e.getMessage(), "")); + String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance, serviceOffering, StringUtils.defaultIfEmpty(e.getMessage(), "")); logger.error(errorMsg, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } @@ -1171,7 +1171,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI } checkUnmanagedDiskLimits(owner, rootDisk, serviceOffering, dataDisks, dataDiskOfferingMap); } catch (ResourceAllocationException e) { - logger.error(String.format("Volume resource allocation error for owner: %s", owner.getUuid()), e); + logger.error("Volume resource allocation error for owner: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume resource allocation error for owner: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } // Check NICs and supplied networks @@ -1459,7 +1459,7 @@ private void checkResourceLimitForImportInstance(Account owner) { try { resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); } catch (ResourceAllocationException e) { - logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error("VM resource allocation error for account: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } } @@ -1640,8 +1640,8 @@ protected UserVm importUnmanagedInstanceFromVmwareToKvm(DataCenter zone, Cluster HostVO convertHost = selectKVMHostForConversionInCluster(destinationCluster, convertInstanceHostId); HostVO importHost = selectKVMHostForImportingInCluster(destinationCluster, importInstanceHostId); CheckConvertInstanceAnswer conversionSupportAnswer = checkConversionSupportOnHost(convertHost, sourceVMName, false); - logger.debug(String.format("The host %s (%s) is selected to execute the conversion of the instance %s" + - " from VMware to KVM ", convertHost.getId(), convertHost.getName(), sourceVMName)); + logger.debug("The host {} is selected to execute the conversion of the " + + "instance {} from VMware to KVM ", convertHost, sourceVMName); temporaryConvertLocation = selectInstanceConversionTemporaryLocation( destinationCluster, convertHost, convertStoragePoolId); @@ -1735,8 +1735,8 @@ private void checkNetworkingBeforeConvertingVmwareInstance(DataCenter zone, Acco private void checkUnmanagedNicAndNetworkMacAddressForImport(NetworkVO network, UnmanagedInstanceTO.Nic nic, boolean forced) { NicVO existingNic = nicDao.findByNetworkIdAndMacAddress(network.getId(), nic.getMacAddress()); if (existingNic != null && !forced) { - String err = String.format("NIC with MAC address %s already exists on network with ID %s and forced flag is disabled. " + - "Retry with forced flag enabled if a new MAC address to be generated.", nic.getMacAddress(), network.getUuid()); + String err = String.format("NIC %s with MAC address %s already exists on network %s and forced flag is disabled. " + + "Retry with forced flag enabled if a new MAC address to be generated.", nic, nic.getMacAddress(), network); logger.error(err); throw new CloudRuntimeException(err); } @@ -1835,19 +1835,19 @@ HostVO selectKVMHostForImportingInCluster(Cluster destinationCluster, Long impor } else if (selectedHost.getResourceState() != ResourceState.Enabled) { err = String.format( "Cannot import the converted instance on the host %s as it is not in Enabled state", - selectedHost.getName()); + selectedHost); } else if (selectedHost.getStatus() != Status.Up) { err = String.format( "Cannot import the converted instance on the host %s as it is not running", - selectedHost.getName()); + selectedHost); } else if (selectedHost.getType() != Host.Type.Routing) { err = String.format( "Cannot import the converted instance on the host %s as it is not a routing host", - selectedHost.getName()); + selectedHost); } else if (destinationCluster.getId() != selectedHost.getClusterId()) { err = String.format( "Cannot import the converted instance on the host %s as it is not in the same cluster as the destination cluster", - selectedHost.getName()); + selectedHost); } if (err != null) { @@ -1864,7 +1864,7 @@ HostVO selectKVMHostForImportingInCluster(Cluster destinationCluster, Long impor String err = String.format( "Could not find any suitable %s host in cluster %s to import the converted instance", - destinationCluster.getHypervisorType(), destinationCluster.getName()); + destinationCluster.getHypervisorType(), destinationCluster); logger.error(err); throw new CloudRuntimeException(err); } @@ -1879,19 +1879,19 @@ HostVO selectKVMHostForConversionInCluster(Cluster destinationCluster, Long conv } else if (!List.of(ResourceState.Enabled, ResourceState.Disabled).contains(selectedHost.getResourceState())) { err = String.format( "Cannot perform the conversion on the host %s as the host is in %s state", - selectedHost.getName(), selectedHost.getResourceState()); + selectedHost, selectedHost.getResourceState()); } else if (selectedHost.getStatus() != Status.Up) { err = String.format( "Cannot perform the conversion on the host %s as it is not running", - selectedHost.getName()); + selectedHost); } else if (selectedHost.getType() != Host.Type.Routing) { err = String.format( "Cannot perform the conversion on the host %s as it is not a routing host", - selectedHost.getName()); + selectedHost); } else if (destinationCluster.getDataCenterId() != selectedHost.getDataCenterId()) { err = String.format( "Cannot perform the conversion on the host %s as it is not in the same zone as the destination cluster", - selectedHost.getName()); + selectedHost); } if (err != null) { logger.error(err); @@ -1913,13 +1913,13 @@ HostVO selectKVMHostForConversionInCluster(Cluster destinationCluster, Long conv } String err = String.format("Could not find any suitable %s host in cluster %s to perform the instance conversion", - destinationCluster.getHypervisorType(), destinationCluster.getName()); + destinationCluster.getHypervisorType(), destinationCluster); logger.error(err); throw new CloudRuntimeException(err); } private CheckConvertInstanceAnswer checkConversionSupportOnHost(HostVO convertHost, String sourceVM, boolean checkWindowsGuestConversionSupport) { - logger.debug(String.format("Checking the %s conversion support on the host %s (%s)", checkWindowsGuestConversionSupport? "windows guest" : "", convertHost.getId(), convertHost.getName())); + logger.debug(String.format("Checking the %s conversion support on the host %s", checkWindowsGuestConversionSupport? "windows guest" : "", convertHost)); CheckConvertInstanceCommand cmd = new CheckConvertInstanceCommand(checkWindowsGuestConversionSupport); int timeoutSeconds = 60; cmd.setWait(timeoutSeconds); @@ -1929,14 +1929,14 @@ private CheckConvertInstanceAnswer checkConversionSupportOnHost(HostVO convertHo checkConvertInstanceAnswer = (CheckConvertInstanceAnswer) agentManager.send(convertHost.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { String err = String.format("Failed to check %s conversion support on the host %s for converting instance %s from VMware to KVM due to: %s", - checkWindowsGuestConversionSupport? "windows guest" : "", convertHost.getName(), sourceVM, e.getMessage()); + checkWindowsGuestConversionSupport? "windows guest" : "", convertHost, sourceVM, e.getMessage()); logger.error(err); throw new CloudRuntimeException(err); } if (!checkConvertInstanceAnswer.getResult()) { String err = String.format("The host %s doesn't support conversion of instance %s from VMware to KVM due to: %s", - convertHost.getName(), sourceVM, checkConvertInstanceAnswer.getDetails()); + convertHost, sourceVM, checkConvertInstanceAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -1950,8 +1950,8 @@ private UnmanagedInstanceTO convertVmwareInstanceToKVMWithOVFOnConvertLocation( List convertStoragePools, DataStoreTO temporaryConvertLocation, String ovfTemplateDirConvertLocation ) { - logger.debug(String.format("Delegating the conversion of instance %s from VMware to KVM to the host %s (%s) using OVF %s on conversion datastore", - sourceVM, convertHost.getId(), convertHost.getName(), ovfTemplateDirConvertLocation)); + logger.debug("Delegating the conversion of instance {} from VMware to KVM to the host {} using OVF {} on conversion datastore", + sourceVM, convertHost, ovfTemplateDirConvertLocation); RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM); List destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks()); @@ -1964,15 +1964,15 @@ private UnmanagedInstanceTO convertVmwareInstanceToKVMWithOVFOnConvertLocation( try { convertAnswer = agentManager.send(convertHost.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Could not send the convert instance command to host %s (%s) due to: %s", - convertHost.getId(), convertHost.getName(), e.getMessage()); + String err = String.format("Could not send the convert instance command to host %s due to: %s", + convertHost, e.getMessage()); logger.error(err, e); throw new CloudRuntimeException(err); } if (!convertAnswer.getResult()) { String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s", - sourceVM, convertHost.getName(), convertAnswer.getDetails()); + sourceVM, convertHost, convertAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -1985,8 +1985,7 @@ private UnmanagedInstanceTO convertVmwareInstanceToKVMAfterExportingOVFToConvert DataStoreTO temporaryConvertLocation, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName ) { - logger.debug(String.format("Delegating the conversion of instance %s from VMware to KVM to the host %s (%s) after OVF export through ovftool", - sourceVM, convertHost.getId(), convertHost.getName())); + logger.debug("Delegating the conversion of instance {} from VMware to KVM to the host {} after OVF export through ovftool", sourceVM, convertHost); RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName); List destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks()); @@ -2014,15 +2013,15 @@ private UnmanagedInstanceTO convertAndImportToKVM(ConvertInstanceCommand convert try { convertAnswer = agentManager.send(convertHost.getId(), convertInstanceCommand); } catch (AgentUnavailableException | OperationTimedoutException e) { - String err = String.format("Could not send the convert instance command to host %s (%s) due to: %s", - convertHost.getId(), convertHost.getName(), e.getMessage()); + String err = String.format("Could not send the convert instance command to host %s due to: %s", + convertHost, e.getMessage()); logger.error(err, e); throw new CloudRuntimeException(err); } if (!convertAnswer.getResult()) { String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s", - sourceVM, convertHost.getName(), convertAnswer.getDetails()); + sourceVM, convertHost, convertAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -2035,8 +2034,8 @@ private UnmanagedInstanceTO convertAndImportToKVM(ConvertInstanceCommand convert importAnswer = agentManager.send(importHost.getId(), importCmd); } catch (AgentUnavailableException | OperationTimedoutException e) { String err = String.format( - "Could not send the import converted instance command to host %d (%s) due to: %s", - importHost.getId(), importHost.getName(), e.getMessage()); + "Could not send the import converted instance command to host %s due to: %s", + importHost, e.getMessage()); logger.error(err, e); throw new CloudRuntimeException(err); } @@ -2044,7 +2043,7 @@ private UnmanagedInstanceTO convertAndImportToKVM(ConvertInstanceCommand convert if (!importAnswer.getResult()) { String err = String.format( "The import process failed for instance %s from VMware to KVM on host %s: %s", - sourceVM, importHost.getName(), importAnswer.getDetails()); + sourceVM, importHost, importAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } @@ -2059,7 +2058,7 @@ private List findInstanceConversionStoragePoolsInCluster(Cluster List zonePools = primaryDataStoreDao.findZoneWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getDataCenterId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem); pools.addAll(zonePools); if (pools.isEmpty()) { - String msg = String.format("Cannot find suitable storage pools in cluster %s for the conversion", destinationCluster.getName()); + String msg = String.format("Cannot find suitable storage pools in cluster %s for the conversion", destinationCluster); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -2233,7 +2232,7 @@ public boolean unmanageVMInstance(long vmId) { String instanceName = vmVO.getInstanceName(); if (!existsVMToUnmanage(instanceName, hostId)) { - throw new CloudRuntimeException("VM with id = " + vmVO.getUuid() + " is not found in the hypervisor"); + throw new CloudRuntimeException(String.format("VM %s is not found in the hypervisor", vmVO)); } return userVmManager.unmanageUserVM(vmId); @@ -2247,11 +2246,11 @@ private boolean existsVMToUnmanage(String instanceName, Long hostId) { command.setInstanceName(instanceName); Answer ans = agentManager.easySend(hostId, command); if (!(ans instanceof PrepareUnmanageVMInstanceAnswer)) { - throw new CloudRuntimeException("Error communicating with host " + hostId); + throw new CloudRuntimeException(String.format("Error communicating with host %s", hostDao.findById(hostId))); } PrepareUnmanageVMInstanceAnswer answer = (PrepareUnmanageVMInstanceAnswer) ans; if (!answer.getResult()) { - logger.error("Error verifying VM " + instanceName + " exists on host with ID = " + hostId + ": " + answer.getDetails()); + logger.error("Error verifying VM {} exists on host {}: {}", instanceName::toString, () -> hostDao.findById(hostId), answer::getDetails); } return answer.getResult(); } @@ -2304,7 +2303,7 @@ private UserVmResponse importKvmInstance(ImportVmCmd cmd) { try { resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); } catch (ResourceAllocationException e) { - logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error("VM resource allocation error for account: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } String displayName = cmd.getDisplayName(); @@ -2492,7 +2491,7 @@ private UserVm importExternalKvmVirtualMachine(final UnmanagedInstanceTO unmanag try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - logger.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + logger.warn("Import failed for Vm: {} while finding deployment destination", userVm, e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); } @@ -2623,7 +2622,7 @@ private UserVm importKvmVirtualMachineFromDisk(final ImportSource importSource, try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - logger.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + logger.warn("Import failed for Vm: {} while finding deployment destination", userVm, e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); } @@ -2717,7 +2716,7 @@ private NetworkVO createDefaultNetworkForAccount(DataCenter zone, Account owner, throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account {} from the network offering {} as a part of deployVM process", owner, requiredOfferings.get(0)); Network newNetwork = networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null, null); diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index 4ae871e1ba58..ceffe0193770 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -492,7 +492,7 @@ void runReleasePublicIpRangePostiveTest2() throws Exception { when(configurationMgr._firewallDao.countRulesByIpId(anyLong())).thenReturn(0L); - when(configurationMgr._ipAddrMgr.disassociatePublicIpAddress(anyLong(), anyLong(), any(Account.class))).thenReturn(true); + when(configurationMgr._ipAddrMgr.disassociatePublicIpAddress(any(), anyLong(), any(Account.class))).thenReturn(true); when(configurationMgr._vlanDao.releaseFromLockTable(anyLong())).thenReturn(true); diff --git a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java index 74897967a2f9..d575365e3d8b 100644 --- a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java +++ b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java @@ -197,13 +197,11 @@ public void scheduleRestartForVmsOnHostNonEmptyVMList() { List vms = new ArrayList(); VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); Mockito.lenient().when(vm1.getHostId()).thenReturn(1l); - //Mockito.when(vm1.getInstanceName()).thenReturn("i-2-3-VM"); Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); Mockito.when(vm1.isHaEnabled()).thenReturn(true); vms.add(vm1); VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm2.getHostId()).thenReturn(1l); - //Mockito.when(vm2.getInstanceName()).thenReturn("r-2-VM"); Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.DomainRouter); Mockito.when(vm2.isHaEnabled()).thenReturn(true); vms.add(vm2); diff --git a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java index 02ddd0c983ef..b59eeaa46245 100644 --- a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java @@ -22,6 +22,7 @@ import com.cloud.dc.DataCenterVO; import com.cloud.dc.Vlan; import com.cloud.dc.VlanVO; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterGuestIpv6PrefixDao; import com.cloud.dc.dao.VlanDao; import com.cloud.event.ActionEventUtils; @@ -36,6 +37,7 @@ import com.cloud.network.dao.Ipv6GuestPrefixSubnetNetworkMapDao; import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.firewall.FirewallService; import com.cloud.network.guru.PublicNetworkGuru; import com.cloud.network.rules.FirewallManager; @@ -119,6 +121,10 @@ public class Ipv6ServiceImplTest { @Mock IPAddressDao ipAddressDao; @Mock + DataCenterDao zoneDao; + @Mock + PhysicalNetworkDao physicalNetworkDao; + @Mock NetworkOrchestrationService networkOrchestrationService; FirewallManager firewallManager = Mockito.mock(FirewallManager.class); @@ -229,12 +235,13 @@ public void testGetUsedTotalIpv6SubnetForZone() { @Test(expected = ResourceAllocationException.class) @DB public void testNoPrefixesPreAllocateIpv6SubnetForNetwork() throws ResourceAllocationException, MalformedObjectNameException, NotCompliantMBeanException, InstanceAlreadyExistsException, MBeanRegistrationException { - final long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(1L); final List prefixes = new ArrayList<>(); - Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId)).thenReturn(prefixes); + Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId())).thenReturn(prefixes); TransactionLegacy txn = TransactionLegacy.open("testNoPrefixesPreAllocateIpv6SubnetForNetwork"); try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } finally { txn.close("testNoPrefixesPreAllocateIpv6SubnetForNetwork"); } @@ -243,17 +250,18 @@ public void testNoPrefixesPreAllocateIpv6SubnetForNetwork() throws ResourceAlloc @Test @DB public void testExistingPreAllocateIpv6SubnetForNetwork() { - final long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(1L); final List prefixes = new ArrayList<>(); DataCenterGuestIpv6PrefixVO prefix = prepareMocksForIpv6Subnet(); prefixes.add(prefix); Ipv6GuestPrefixSubnetNetworkMapVO ipv6GuestPrefixSubnetNetworkMap = new Ipv6GuestPrefixSubnetNetworkMapVO(1L, "fd17:5:8a43:e2a4::/64", null, Ipv6GuestPrefixSubnetNetworkMap.State.Free); - Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId)).thenReturn(prefixes); + Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId())).thenReturn(prefixes); Mockito.when(ipv6GuestPrefixSubnetNetworkMapDao.findFirstAvailable(prefix.getId())).thenReturn(ipv6GuestPrefixSubnetNetworkMap); updatedPrefixSubnetMap.clear(); try (TransactionLegacy txn = TransactionLegacy.open("testNoPrefixesPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } catch (ResourceAllocationException e) { Assert.fail("ResourceAllocationException"); } @@ -269,7 +277,8 @@ public void testExistingPreAllocateIpv6SubnetForNetwork() { @Test @DB public void testNewPreAllocateIpv6SubnetForNetwork() { - final long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(1L); final List prefixes = new ArrayList<>(); DataCenterGuestIpv6PrefixVO prefix = prepareMocksForIpv6Subnet(); final IPv6Network ip6Prefix = IPv6Network.fromString(prefix.getPrefix()); @@ -279,14 +288,14 @@ public void testNewPreAllocateIpv6SubnetForNetwork() { subnets.add(splits.next().toString()); } prefixes.add(prefix); - Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId)).thenReturn(prefixes); + Mockito.when(dataCenterGuestIpv6PrefixDao.listByDataCenterId(zone.getId())).thenReturn(prefixes); Mockito.when(ipv6GuestPrefixSubnetNetworkMapDao.findFirstAvailable(prefix.getId())).thenReturn(null); Mockito.when(ipv6GuestPrefixSubnetNetworkMapDao.listUsedByPrefix(prefix.getId())).thenReturn(new ArrayList<>()); persistedPrefixSubnetMap.clear(); // No subnet is used from the prefix, should allocate any subnet try (TransactionLegacy txn = TransactionLegacy.open("testNewPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } catch (ResourceAllocationException e) { Assert.fail("ResourceAllocationException"); } @@ -306,7 +315,7 @@ public void testNewPreAllocateIpv6SubnetForNetwork() { // All subnets from the prefix are already in use, should return ResourceAllocationException try (TransactionLegacy txn = TransactionLegacy.open("testNewPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); Assert.fail("ResourceAllocationException expected but not returned"); } catch (ResourceAllocationException ignored) {} } @@ -316,7 +325,7 @@ public void testNewPreAllocateIpv6SubnetForNetwork() { Ipv6GuestPrefixSubnetNetworkMapVO poppedUsedSubnetMap = usedSubnets.remove(2); try (TransactionLegacy txn = TransactionLegacy.open("testNewPreAllocateIpv6SubnetForNetwork")) { try { - ipv6Service.preAllocateIpv6SubnetForNetwork(zoneId); + ipv6Service.preAllocateIpv6SubnetForNetwork(zone); } catch (ResourceAllocationException e) { Assert.fail("ResourceAllocationException"); } @@ -408,6 +417,9 @@ public void testNewErrorAssignPublicIpv6ToNetwork() { Nic nic = Mockito.mock(Nic.class); Mockito.when(nic.getIPv6Address()).thenReturn(null); Mockito.when(nic.getBroadcastUri()).thenReturn(URI.create(vlan)); + DataCenterVO zoneMock = Mockito.mock(DataCenterVO.class); + Mockito.when(zoneDao.findById(Mockito.anyLong())).thenReturn(zoneMock); + Mockito.when(zoneMock.getUuid()).thenReturn("uuid"); try (TransactionLegacy txn = TransactionLegacy.open("testNewErrorAssignPublicIpv6ToNetwork")) { ipv6Service.assignPublicIpv6ToNetwork(Mockito.mock(Network.class), nic); } @@ -611,6 +623,9 @@ public void testCheckNetworkIpv6UpgradeForNoIpv6Vlan() { Mockito.when(vlanVO.getVlanTag()).thenReturn(vlan); Mockito.when(vlanDao.findById(Mockito.anyLong())).thenReturn(vlanVO); Mockito.when(vlanDao.listIpv6RangeByZoneIdAndVlanId(Mockito.anyLong(), Mockito.anyString())).thenReturn(new ArrayList<>()); + DataCenterVO zoneMock = Mockito.mock(DataCenterVO.class); + Mockito.when(zoneDao.findById(zoneId)).thenReturn(zoneMock); + Mockito.when(zoneMock.getUuid()).thenReturn("uuid"); try { ipv6Service.checkNetworkIpv6Upgrade(network); Assert.fail("No InsufficientAddressCapacityException"); diff --git a/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java b/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java index cfdb857b5bfb..73437293933e 100644 --- a/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java +++ b/server/src/test/java/com/cloud/network/MockFirewallManagerImpl.java @@ -132,7 +132,7 @@ public void revokeRule(FirewallRuleVO rule, Account caller, long userId, boolean } @Override - public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeFirewallRulesForIp(IpAddress ip, long userId, Account caller) throws ResourceUnavailableException { // TODO Auto-generated method stub return false; } @@ -145,7 +145,7 @@ public FirewallRule createRuleForAllCidrs(long ipAddrId, Account caller, Integer } @Override - public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Account caller) throws ResourceUnavailableException { + public boolean revokeAllFirewallRulesForNetwork(Network network, long userId, Account caller) throws ResourceUnavailableException { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index 51d5a61cbca4..a37559e73e3e 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -577,7 +577,7 @@ private void prepareCreateNetworkDnsMocks(CreateNetworkCmd cmd, Network.GuestTyp if(ipv6 && Network.GuestType.Isolated.equals(guestType)) { Mockito.when(networkOfferingDao.isIpv6Supported(networkOfferingId)).thenReturn(true); try { - Mockito.when(ipv6Service.preAllocateIpv6SubnetForNetwork(Mockito.anyLong())).thenReturn(new Pair<>(IP6_GATEWAY, IP6_CIDR)); + Mockito.when(ipv6Service.preAllocateIpv6SubnetForNetwork(Mockito.any())).thenReturn(new Pair<>(IP6_GATEWAY, IP6_CIDR)); } catch (ResourceAllocationException e) { Assert.fail(String.format("failure with exception: %s", e.getMessage())); } diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index b391aeb9f076..7036cef33ec0 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -86,6 +86,7 @@ import com.cloud.user.UserVO; import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.EntityManager; @@ -265,6 +266,7 @@ public class AutoScaleManagerImplTest { final static String INVALID = "invalid"; private static final Long counterId = 1L; + private static final String counterUuid = "1111-1111-1100"; private static final String counterName = "counter name"; private static final Counter.Source counterSource = Counter.Source.CPU; private static final String counterValue = "counter value"; @@ -397,7 +399,7 @@ public class AutoScaleManagerImplTest { public void setUp() { account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); - account.setId(2L); + account.setId(5L); user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); CallContext.register(user, account); @@ -1202,7 +1204,7 @@ public void testDeleteAutoScaleVmGroupsByAccount() throws ResourceUnavailableExc when(autoScaleVmGroupVmMapDao.removeByGroup(vmGroupId)).thenReturn(true); when(asGroupStatisticsDao.removeByGroupId(vmGroupId)).thenReturn(true); - boolean result = autoScaleManagerImplSpy.deleteAutoScaleVmGroupsByAccount(accountId); + boolean result = autoScaleManagerImplSpy.deleteAutoScaleVmGroupsByAccount(account); Assert.assertTrue(result); @@ -1218,7 +1220,7 @@ public void testCleanUpAutoScaleResources() { when(asPolicyDao.removeByAccountId(accountId)).thenReturn(2); when(conditionDao.removeByAccountId(accountId)).thenReturn(3); - autoScaleManagerImplSpy.cleanUpAutoScaleResources(accountId); + autoScaleManagerImplSpy.cleanUpAutoScaleResources(account); Mockito.verify(autoScaleVmProfileDao).removeByAccountId(accountId); Mockito.verify(asPolicyDao).removeByAccountId(accountId); @@ -1271,9 +1273,9 @@ public void testCreateNewVM1() throws ResourceUnavailableException, Insufficient any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any())).thenReturn(userVmMock); - long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); + UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); - Assert.assertEquals((long) virtualMachineId, result); + Assert.assertEquals(userVmMock, result); String vmHostNamePattern = autoScaleManagerImplSpy.VM_HOSTNAME_PREFIX + vmGroupName + "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; @@ -1321,9 +1323,9 @@ public void testCreateNewVM2() throws ResourceUnavailableException, Insufficient when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, List.of(networkId), Collections.emptyList())).thenReturn(true); - long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); + UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); - Assert.assertEquals((long) virtualMachineId, result); + Assert.assertEquals(userVmMock, result); String vmHostNamePattern = autoScaleManagerImplSpy.VM_HOSTNAME_PREFIX + vmGroupName + "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; @@ -1371,9 +1373,9 @@ public void testCreateNewVM3() throws ResourceUnavailableException, Insufficient when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, List.of(networkId), Collections.emptyList())).thenReturn(false); - long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); + UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); - Assert.assertEquals((long) virtualMachineId, result); + Assert.assertEquals(userVmMock, result); String vmHostNamePattern = autoScaleManagerImplSpy.VM_HOSTNAME_PREFIX + vmGroupNameWithMaxLength.substring(0, 41) + "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; @@ -1503,7 +1505,8 @@ public void testDoScaleUp() throws ResourceUnavailableException, InsufficientCap when(autoScaleVmGroupDao.updateState(vmGroupId, AutoScaleVmGroup.State.ENABLED, AutoScaleVmGroup.State.SCALING)).thenReturn(true); when(autoScaleVmGroupDao.updateState(vmGroupId, AutoScaleVmGroup.State.SCALING, AutoScaleVmGroup.State.ENABLED)).thenReturn(true); - Mockito.doReturn(virtualMachineId).when(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); + Mockito.doReturn(userVmMock).when(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); + when(userVmMock.getId()).thenReturn(virtualMachineId); when(asVmGroupMock.getLoadBalancerId()).thenReturn(loadBalancerId); when(lbVmMapDao.listByLoadBalancerId(loadBalancerId)).thenReturn(Arrays.asList(loadBalancerVMMapMock)); @@ -1969,8 +1972,8 @@ public void processPerformanceMonitorAnswer() { autoScaleManagerImplSpy.processPerformanceMonitorAnswer(countersMap, countersNumberMap, groupTO, params, details); - Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleUpCounterId), eq(scaleUpConditionId), eq(0L), eq(value1), eq(AutoScaleValueType.INSTANT_VM)); - Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleDownCounterId), eq(scaleDownConditionId), eq(0L), eq(value2), eq(AutoScaleValueType.INSTANT_VM)); + Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleUpCounterId), any(ConditionTO.class), any(AutoScalePolicyTO.class), eq(value1), eq(AutoScaleValueType.INSTANT_VM)); + Mockito.verify(autoScaleManagerImplSpy).updateCountersMapWithInstantData(any(), any(), eq(groupTO), eq(scaleDownCounterId), any(ConditionTO.class), any(AutoScalePolicyTO.class), eq(value2), eq(AutoScaleValueType.INSTANT_VM)); } @Test @@ -2109,6 +2112,11 @@ public void updateCountersMap2() { public void updateCountersMapWithInstantDataForMemory() { AutoScaleVmGroupTO groupTO = Mockito.mock(AutoScaleVmGroupTO.class); AutoScaleVmProfileTO profileTO = Mockito.mock(AutoScaleVmProfileTO.class); + AutoScalePolicyTO scaleUpPolicyTO = Mockito.mock(AutoScalePolicyTO.class); + ConditionTO conditionTO = Mockito.mock(ConditionTO.class); + + when(conditionTO.getId()).thenReturn(conditionId); + when(scaleUpPolicyTO.getId()).thenReturn(scaleUpPolicyId); when(counterDao.findById(counterId)).thenReturn(counterMock); when(counterMock.getSource()).thenReturn(Counter.Source.MEMORY); @@ -2127,7 +2135,7 @@ public void updateCountersMapWithInstantDataForMemory() { double value = 512; autoScaleManagerImplSpy.updateCountersMapWithInstantData(countersMap, countersNumberMap, - groupTO, counterId, conditionId, scaleUpPolicyId, value, AutoScaleValueType.INSTANT_VM); + groupTO, counterId, conditionTO, scaleUpPolicyTO, value, AutoScaleValueType.INSTANT_VM); Assert.assertEquals(1, countersMap.size()); Assert.assertEquals(1, countersNumberMap.size()); @@ -2140,7 +2148,11 @@ public void updateCountersMapWithInstantDataForMemory() { public void updateCountersMapWithInstantDataForCPU() { AutoScaleVmGroupTO groupTO = Mockito.mock(AutoScaleVmGroupTO.class); AutoScaleVmProfileTO profileTO = Mockito.mock(AutoScaleVmProfileTO.class); + AutoScalePolicyTO scaleUpPolicyTO = Mockito.mock(AutoScalePolicyTO.class); + ConditionTO conditionTO = Mockito.mock(ConditionTO.class); + when(conditionTO.getId()).thenReturn(conditionId); + when(scaleUpPolicyTO.getId()).thenReturn(scaleUpPolicyId); when(counterDao.findById(counterId)).thenReturn(counterMock); when(counterMock.getSource()).thenReturn(Counter.Source.CPU); @@ -2153,7 +2165,7 @@ public void updateCountersMapWithInstantDataForCPU() { double value = 0.5; autoScaleManagerImplSpy.updateCountersMapWithInstantData(countersMap, countersNumberMap, - groupTO, counterId, conditionId, scaleUpPolicyId, value, AutoScaleValueType.INSTANT_VM); + groupTO, counterId, conditionTO, scaleUpPolicyTO, value, AutoScaleValueType.INSTANT_VM); Assert.assertEquals(1, countersMap.size()); Assert.assertEquals(1, countersNumberMap.size()); @@ -2287,9 +2299,9 @@ public void processVmStatsByIdFromHost() { vmStatsById.put(virtualMachineId, vmStats); Map> policyCountersMap = new HashMap<>(); - CounterTO counter1 = new CounterTO(counterId, counterName, Counter.Source.CPU, counterValue, counterProvider); - CounterTO counter2 = new CounterTO(counterId + 1, counterName, Counter.Source.MEMORY, counterValue, counterProvider); - CounterTO counter3 = new CounterTO(counterId + 2, counterName, Counter.Source.VIRTUALROUTER, counterValue, counterProvider); + CounterTO counter1 = new CounterTO(counterId, counterUuid, counterName, Counter.Source.CPU, counterValue, counterProvider); + CounterTO counter2 = new CounterTO(counterId + 1, counterUuid, counterName, Counter.Source.MEMORY, counterValue, counterProvider); + CounterTO counter3 = new CounterTO(counterId + 2, counterUuid, counterName, Counter.Source.VIRTUALROUTER, counterValue, counterProvider); policyCountersMap.put(scaleUpPolicyId, Arrays.asList(counter1, counter2, counter3)); when(asGroupStatisticsDao.persist(any())).thenReturn(Mockito.mock(AutoScaleVmGroupStatisticsVO.class)); diff --git a/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java b/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java index 8a5b965514ce..20ddb39d9432 100644 --- a/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java +++ b/server/src/test/java/com/cloud/network/element/VpcVirtualRouterElementTest.java @@ -23,6 +23,7 @@ import com.cloud.network.VpnUser; import com.cloud.network.router.VpcVirtualNetworkApplianceManagerImpl; import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.dao.VpcDao; import com.cloud.utils.db.EntityManager; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.dao.DomainRouterDao; @@ -51,7 +52,10 @@ public class VpcVirtualRouterElementTest { @Mock DataCenterDao _dcDao; - @Mock private DomainRouterDao _routerDao; + @Mock + private DomainRouterDao _routerDao; + @Mock + VpcDao _vpcDao; @Mock EntityManager _entityMgr; diff --git a/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java b/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java index 182ad83c6ceb..0abcf9591d44 100644 --- a/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java +++ b/server/src/test/java/com/cloud/projects/MockProjectManagerImpl.java @@ -179,7 +179,7 @@ public boolean canModifyProjectAccount(Account caller, long accountId) { } @Override - public boolean deleteAccountFromProject(long projectId, long accountId) { + public boolean deleteAccountFromProject(long projectId, Account account) { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 6aae7a091d3f..e8b297ff188c 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -620,7 +620,7 @@ public boolean releaseHostReservation(final Long hostId) { } @Override - public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) { + public boolean isGPUDeviceAvailable(final Host host, final String groupName, final String vgpuType) { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 666324d4ed27..9630b341bc9c 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -1047,9 +1047,6 @@ public void cleanVolumesCacheTest() { volumeApiServiceImpl.cleanVolumesCache(volumeVoMock); - Mockito.verify(dataStoreMock1).getName(); - Mockito.verify(dataStoreMock2).getName(); - Mockito.verify(volumeInfoMock1).delete(); Mockito.verify(volumeInfoMock2).delete(); } diff --git a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java index 645c9e5aa675..384790eeb9ce 100644 --- a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java @@ -171,11 +171,11 @@ public void deleteUserAccount() { Mockito.when(_accountDao.findById(42l)).thenReturn(account); Mockito.doNothing().when(accountManagerImpl).checkAccess(Mockito.any(Account.class), Mockito.isNull(), Mockito.anyBoolean(), Mockito.any(Account.class)); Mockito.when(_accountDao.remove(42l)).thenReturn(true); - Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(42l)).thenReturn(true); + Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(account)).thenReturn(true); Mockito.lenient().when(_domainMgr.getDomain(Mockito.anyLong())).thenReturn(domain); Mockito.lenient().when(securityChecker.checkAccess(Mockito.any(Account.class), Mockito.any(Domain.class))).thenReturn(true); Mockito.when(_vmSnapshotDao.listByAccountId(Mockito.anyLong())).thenReturn(new ArrayList()); - Mockito.when(_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(42l)).thenReturn(true); + Mockito.when(_autoscaleMgr.deleteAutoScaleVmGroupsByAccount(account)).thenReturn(true); List sshkeyList = new ArrayList(); SSHKeyPairVO sshkey = new SSHKeyPairVO(); @@ -199,7 +199,7 @@ public void deleteUserAccountCleanup() { Mockito.when(_accountDao.findById(42l)).thenReturn(account); Mockito.doNothing().when(accountManagerImpl).checkAccess(Mockito.any(Account.class), Mockito.isNull(), Mockito.anyBoolean(), Mockito.any(Account.class)); Mockito.when(_accountDao.remove(42l)).thenReturn(true); - Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(42l)).thenReturn(true); + Mockito.when(_configMgr.releaseAccountSpecificVirtualRanges(account)).thenReturn(true); Mockito.when(_userVmDao.listByAccountId(42l)).thenReturn(Arrays.asList(Mockito.mock(UserVmVO.class))); Mockito.when(_vmMgr.expunge(Mockito.any(UserVmVO.class))).thenReturn(false); Mockito.lenient().when(_domainMgr.getDomain(Mockito.anyLong())).thenReturn(domain); @@ -670,7 +670,6 @@ public void validateAndUpdateUsernameIfNeededTestDuplicatedUserSameDomainThisUse long userVoDuplicatedMockId = 67l; UserVO userVoDuplicatedMock = Mockito.mock(UserVO.class); - Mockito.doReturn(userName).when(userVoDuplicatedMock).getUsername(); Mockito.doReturn(userVoDuplicatedMockId).when(userVoDuplicatedMock).getId(); long accountIdUserDuplicated = 98l; diff --git a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java index 39155986941e..2d52f0aa52ef 100644 --- a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java @@ -211,7 +211,7 @@ public void testDeleteDomainRootDomain() { @Test public void testDeleteDomainNoCleanup() { - Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.anyLong())).thenReturn(true); + Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.any())).thenReturn(true); domainManager.deleteDomain(DOMAIN_ID, testDomainCleanup); Mockito.verify(domainManager).deleteDomain(domain, testDomainCleanup); Mockito.verify(domainManager).removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources(domain); @@ -276,7 +276,7 @@ public void deleteDomain() { Mockito.when(_accountDao.findCleanupsForRemovedAccounts(Mockito.anyLong())).thenReturn(new ArrayList()); Mockito.when(_dedicatedDao.listByDomainId(Mockito.anyLong())).thenReturn(new ArrayList()); Mockito.when(domainDaoMock.remove(Mockito.anyLong())).thenReturn(true); - Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.anyLong())).thenReturn(true); + Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.any())).thenReturn(true); try { Assert.assertTrue(domainManager.deleteDomain(20l, false)); @@ -307,7 +307,7 @@ public void deleteDomainCleanup() { Mockito.when(domainDaoMock.remove(Mockito.anyLong())).thenReturn(true); Mockito.when(_resourceCountDao.removeEntriesByOwner(Mockito.anyLong(), Mockito.eq(ResourceOwnerType.Domain))).thenReturn(1l); Mockito.when(_resourceLimitDao.removeEntriesByOwner(Mockito.anyLong(), Mockito.eq(ResourceOwnerType.Domain))).thenReturn(1l); - Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.anyLong())).thenReturn(true); + Mockito.when(_configMgr.releaseDomainSpecificVirtualRanges(Mockito.any())).thenReturn(true); try { Assert.assertTrue(domainManager.deleteDomain(20l, true)); diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index d49dcd0f00c9..1cfc0cf9a85f 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -546,7 +546,7 @@ public void validateOrReplaceMacAddressTestMacAddressNotValidOption4() throws In private void configureValidateOrReplaceMacAddressTest(int times, String macAddress, String expectedMacAddress) throws InsufficientAddressCapacityException { Mockito.when(networkModel.getNextAvailableMacAddressInNetwork(Mockito.anyLong())).thenReturn(expectedMacAddress); - String returnedMacAddress = userVmManagerImpl.validateOrReplaceMacAddress(macAddress, 1l); + String returnedMacAddress = userVmManagerImpl.validateOrReplaceMacAddress(macAddress, _networkMock); Mockito.verify(networkModel, Mockito.times(times)).getNextAvailableMacAddressInNetwork(Mockito.anyLong()); assertEquals(expectedMacAddress, returnedMacAddress); diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerTest.java index c3255e064499..1ed0a30af108 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerTest.java @@ -772,7 +772,6 @@ public void testUpdateVmNicIpFailure3() throws Exception { @Test public void testApplyUserDataInNetworkWithoutUserDataSupport() throws Exception { UserVm userVm = mock(UserVm.class); - when(userVm.getId()).thenReturn(1L); when(_nicMock.getNetworkId()).thenReturn(2L); when(_networkMock.getNetworkOfferingId()).thenReturn(3L); diff --git a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index 440431086eeb..06c917a12440 100644 --- a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -353,7 +353,7 @@ public void testUpdateUserVmServiceOfferingDifferentServiceOffering() throws Con verify(_vmSnapshotMgr).changeUserVmServiceOffering(userVm, vmSnapshotVO); verify(_vmSnapshotMgr).getVmMapDetails(userVm); - verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); + verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(userVm), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); } @Test @@ -371,7 +371,7 @@ public void testChangeUserVmServiceOffering() throws ConcurrentOperationExceptio when(_userVmManager.upgradeVirtualMachine(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture())).thenReturn(true); _vmSnapshotMgr.changeUserVmServiceOffering(userVm, vmSnapshotVO); verify(_vmSnapshotMgr).getVmMapDetails(userVm); - verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); + verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(userVm), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); } @Test(expected=CloudRuntimeException.class) @@ -379,7 +379,7 @@ public void testChangeUserVmServiceOfferingFailOnUpgradeVMServiceOffering() thro when(_userVmManager.upgradeVirtualMachine(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture())).thenReturn(false); _vmSnapshotMgr.changeUserVmServiceOffering(userVm, vmSnapshotVO); verify(_vmSnapshotMgr).getVmMapDetails(userVm); - verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(TEST_VM_ID), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); + verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(ArgumentMatchers.eq(userVm), ArgumentMatchers.eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture()); } @Test @@ -389,7 +389,7 @@ public void testUpgradeUserVmServiceOffering() throws ConcurrentOperationExcepti put(userVmDetailMemory.getName(), userVmDetailMemory.getValue()); }}; when(_userVmManager.upgradeVirtualMachine(TEST_VM_ID, SERVICE_OFFERING_ID, details)).thenReturn(true); - _vmSnapshotMgr.upgradeUserVmServiceOffering(TEST_VM_ID, SERVICE_OFFERING_ID, details); + _vmSnapshotMgr.upgradeUserVmServiceOffering(userVm, SERVICE_OFFERING_ID, details); verify(_userVmManager).upgradeVirtualMachine(TEST_VM_ID, SERVICE_OFFERING_ID, details); } diff --git a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java index 8f05b716725a..cdd23b0ccc2c 100644 --- a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -577,7 +577,7 @@ public void createDefaultSystemNetworks(long zoneId) throws ConcurrentOperationE * @see com.cloud.configuration.ConfigurationManager#releaseDomainSpecificVirtualRanges(long) */ @Override - public boolean releaseDomainSpecificVirtualRanges(long domainId) { + public boolean releaseDomainSpecificVirtualRanges(Domain domain) { // TODO Auto-generated method stub return false; } @@ -586,7 +586,7 @@ public boolean releaseDomainSpecificVirtualRanges(long domainId) { * @see com.cloud.configuration.ConfigurationManager#releaseAccountSpecificVirtualRanges(long) */ @Override - public boolean releaseAccountSpecificVirtualRanges(long accountId) { + public boolean releaseAccountSpecificVirtualRanges(Account account) { // TODO Auto-generated method stub return false; } diff --git a/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java b/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java index b5c842b8806a..0b4c48e99b47 100644 --- a/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java +++ b/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java @@ -32,6 +32,7 @@ import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.NsxProviderDao; +import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.element.VirtualRouterProviderVO; import com.cloud.network.router.VirtualRouter.Role; @@ -75,6 +76,8 @@ public class RouterDeploymentDefinitionTest extends RouterDeploymentDefinitionTe @Mock protected NetworkVO mockNw; @Mock + PhysicalNetworkDao physicalNetworkDao; + @Mock protected NsxProviderDao nsxProviderDao; protected RouterDeploymentDefinition deployment; @@ -101,6 +104,7 @@ public void initTest() { .setAccountOwner(mockOwner) .setParams(params) .build(); + deployment.pNtwkDao = physicalNetworkDao; } @Test @@ -607,6 +611,8 @@ public void testFindVirtualProvider() { @Test(expected = CloudRuntimeException.class) public void testFindVirtualProviderWithNullPhyNwSrvProvider() { // Prepare + + when(physicalNetworkDao.findById(PHYSICAL_NW_ID)).thenReturn(null); when(mockNetworkModel.getPhysicalNetworkId(deployment.guestNetwork)).thenReturn(PHYSICAL_NW_ID); final Type type = Type.VirtualRouter; when(physicalProviderDao.findByServiceProvider(PHYSICAL_NW_ID, type.toString())) From 6557a3d343c98439afcc0254dc4cbc425acc42da Mon Sep 17 00:00:00 2001 From: Vishesh Date: Mon, 9 Dec 2024 11:49:00 +0530 Subject: [PATCH 13/22] Fixups in engine --- .../engine/subsystem/api/storage/TemplateInfo.java | 1 + .../subsystem/api/storage/TemplateService.java | 3 ++- .../storage/image/TemplateServiceImpl.java | 12 ++++++------ .../storage/snapshot/SnapshotServiceImpl.java | 2 +- .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 6 +++--- .../storage/helper/VMSnapshotHelperImpl.java | 11 ++++++----- .../storage/vmsnapshot/VMSnapshotHelper.java | 3 ++- .../cloudstack/storage/volume/VolumeServiceImpl.java | 12 ++++-------- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index a0b62ebce54a..1f7bf45a15ae 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -23,6 +23,7 @@ public interface TemplateInfo extends DownloadableDataInfo, VirtualMachineTemplate { VirtualMachineTemplate getImage(); + @Override String getUniqueName(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index df13f951a448..115cf024617f 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.template.VirtualMachineTemplate; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; @@ -60,7 +61,7 @@ public TemplateInfo getTemplate() { AsyncCallFuture deleteTemplateOnPrimary(TemplateInfo template, StoragePool pool); - void syncTemplateToRegionStore(long templateId, DataStore store); + void syncTemplateToRegionStore(VirtualMachineTemplate templateId, DataStore store); void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 1ea2c34de77f..38e0d0d081cb 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -395,7 +395,7 @@ public void handleTemplateSync(DataStore store) { logger.error("Unexpected state transition exception for template {}. Details: {}", tmplt, e.getMessage()); } } else if (tmplt.getUrl() == null) { - msg = "Private template (" + tmplt + ") with install path " + tmpltInfo.getInstallPath() + " is corrupted, please check in image store: " + tmpltStore.getDataStoreId(); + msg = String.format("Private template (%s) with install path %s is corrupted, please check in image store: %s", tmplt, tmpltInfo.getInstallPath(), store); logger.warn(msg); } else { logger.info("Removing template_store_ref entry for corrupted template {}", tmplt); @@ -1023,20 +1023,20 @@ protected Void syncTemplateCallBack(AsyncCallbackDispatcher future = context.future; SnapshotResult snapResult = new SnapshotResult(snapshot, result.getAnswer()); if (result.isFailed()) { - logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); + logger.debug("create snapshot {} failed: {}", context.snapshot, result.getResult()); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index 11786e4f0ab8..1ec6e20fc9e1 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -163,7 +163,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { Map srcVolumeDestSnapshotMap = new HashMap<>(); List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); long prev_chain_size = 0; long virtual_size=0; for (VolumeObjectTO volume : volumeTOs) { @@ -291,7 +291,7 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { boolean result = false; try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); - StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); Long storagePoolId = storagePool.getId(); Map srcSnapshotDestVolumeMap = new HashMap<>(); for (VolumeObjectTO volume : volumeTOs) { @@ -379,7 +379,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { try { List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); - StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = vmSnapshotHelper.getStoragePoolForVM(userVm); String systemId = storagePoolDetailsDao.findDetail(storagePool.getId(), ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); if (systemId == null) { throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName()); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index 5b712b32a6ad..f2a3d99f93c4 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import com.cloud.uservm.UserVm; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -150,22 +151,22 @@ public VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { } @Override - public StoragePoolVO getStoragePoolForVM(Long vmId) { - List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vmId); + public StoragePoolVO getStoragePoolForVM(UserVm vm) { + List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vm.getId()); if (rootVolumes == null || rootVolumes.isEmpty()) { - throw new InvalidParameterValueException("Failed to find root volume for the user vm:" + vmId); + throw new InvalidParameterValueException(String.format("Failed to find root volume for the user vm: %s", vm)); } VolumeVO rootVolume = rootVolumes.get(0); StoragePoolVO rootVolumePool = primaryDataStoreDao.findById(rootVolume.getPoolId()); if (rootVolumePool == null) { throw new InvalidParameterValueException(String.format( - "Failed to find storage pool for root volume %s for the user vm: %d", rootVolume, vmId)); + "Failed to find storage pool for root volume %s for the user vm: %s", rootVolume, vm)); } if (rootVolumePool.isInMaintenance()) { throw new InvalidParameterValueException(String.format( - "Storage pool %s for root volume %s of the user vm: %d is in maintenance", rootVolumePool, rootVolume, vmId)); + "Storage pool %s for root volume %s of the user vm: %s is in maintenance", rootVolumePool, rootVolume, vm)); } return rootVolumePool; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 6467072b1b3e..6d6cb7b70a93 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -20,6 +20,7 @@ import java.util.List; +import com.cloud.uservm.UserVm; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; @@ -38,7 +39,7 @@ public interface VMSnapshotHelper { VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); - StoragePoolVO getStoragePoolForVM(Long vmId); + StoragePoolVO getStoragePoolForVM(UserVm vm); Storage.StoragePoolType getStoragePoolType(Long poolId); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 717b70c34aad..bf67be911086 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -915,7 +915,7 @@ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, P VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), srcTemplateInfo.getDeployAsIsConfiguration()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore); } else if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { // Template already exists return templateOnPrimary; @@ -1098,13 +1098,9 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { - String msg = "Unable to get an answer to the modify targets command"; - - logger.warn(msg); + logger.warn("Unable to get an answer to the modify targets command"); } else if (!answer.getResult()) { - String msg = String.format("Unable to modify target on the following host: %s", host); - - logger.warn(msg); + logger.warn("Unable to modify target on the following host: {}", host); } } @@ -1152,7 +1148,7 @@ private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId(), null); if (templatePoolRef == null) { - throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateOnPrimary.getUniqueName(), srcTemplateOnPrimary)); + throw new CloudRuntimeException(String.format("Failed to find template %s in storage pool %s", srcTemplateOnPrimary.getImage(), srcTemplateOnPrimary)); } if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { From 9146221c7ebdd5f8b39122ea52e28462d350cc4d Mon Sep 17 00:00:00 2001 From: Vishesh Date: Mon, 16 Dec 2024 13:26:29 +0530 Subject: [PATCH 14/22] Improve logging to include more identifiable information for plugins --- .../src/main/java/com/cloud/agent/Agent.java | 47 +++++++++-- .../com/cloud/agent/api/ReadyCommand.java | 20 ++++- .../storage/to/PrimaryDataStoreTO.java | 10 +-- .../cloud/agent/manager/AgentManagerImpl.java | 4 +- .../cloudstack/affinity/AffinityGroupVO.java | 6 +- .../affinity/ExplicitDedicationProcessor.java | 6 +- .../affinity/HostAffinityProcessor.java | 2 +- .../affinity/HostAntiAffinityProcessor.java | 10 +-- .../NonStrictHostAffinityProcessor.java | 10 +-- .../cloudstack/sioc/SiocManagerImpl.java | 11 ++- .../backup/DummyBackupProvider.java | 10 +-- .../backup/NetworkerBackupProvider.java | 35 ++++---- .../backup/networker/NetworkerClient.java | 6 +- .../backup/VeeamBackupProvider.java | 21 +++-- .../backup/VeeamBackupProviderTest.java | 2 +- .../DedicatedResourceManagerImpl.java | 50 +++++------ .../deploy/ImplicitDedicationPlanner.java | 13 +-- .../allocator/impl/RandomAllocator.java | 8 +- .../manager/BareMetalDiscoverer.java | 2 +- .../baremetal/manager/BareMetalPlanner.java | 6 +- .../manager/BaremetalManagerImpl.java | 10 +-- .../java/com/cloud/ha/HypervInvestigator.java | 2 +- .../discoverer/HypervServerDiscoverer.java | 12 +-- .../hypervisor/hyperv/guru/HypervGuru.java | 2 +- .../kvm/storage/KVMStorageProcessor.java | 68 ++++++++------- .../com/cloud/ha/SimulatorInvestigator.java | 2 +- .../hypervisor/guru/VmwareVmImplementer.java | 2 +- .../vmware/VmwareServerDiscoverer.java | 2 +- .../resource/VmwareStorageProcessor.java | 12 +-- .../XenServerStorageMotionStrategy.java | 22 +++-- .../cloudian/CloudianConnectorImpl.java | 6 +- .../cluster/KubernetesClusterManagerImpl.java | 82 +++++++++---------- .../cluster/KubernetesClusterVO.java | 8 ++ .../KubernetesClusterDestroyWorker.java | 34 ++++---- .../KubernetesClusterScaleWorker.java | 35 ++++---- .../KubernetesClusterUpgradeWorker.java | 12 ++- .../cluster/utils/KubernetesClusterUtil.java | 50 +++++------ .../version/KubernetesSupportedVersionVO.java | 8 ++ .../version/KubernetesVersionManagerImpl.java | 6 +- .../metrics/MetricsServiceImpl.java | 2 +- .../cloud/network/BigSwitchBcfDeviceVO.java | 8 ++ .../network/element/BigSwitchBcfElement.java | 18 ++-- .../guru/BigSwitchBcfGuestNetworkGuru.java | 10 +-- .../network/element/BrocadeVcsElement.java | 14 ++-- .../guru/BrocadeVcsGuestNetworkGuru.java | 10 +-- .../network/cisco/CiscoVnmcControllerVO.java | 9 ++ .../network/element/CiscoVnmcElement.java | 14 ++-- .../element/ElasticLoadBalancerElement.java | 2 +- .../lb/ElasticLoadBalancerManagerImpl.java | 6 +- .../network/lb/LoadBalanceRuleHandler.java | 10 ++- .../element/InternalLoadBalancerElement.java | 3 +- .../lb/InternalLoadBalancerVMManagerImpl.java | 14 ++-- .../management/ContrailElementImpl.java | 18 ++-- .../contrail/management/ContrailGuru.java | 18 ++-- .../management/ManagementNetworkGuru.java | 2 +- .../contrail/model/VirtualMachineModel.java | 4 +- .../contrail/model/VirtualNetworkModel.java | 5 +- .../network/element/NetscalerElement.java | 6 +- .../network/vm/NetScalerVMManagerImpl.java | 4 +- .../network/element/NiciraNvpElement.java | 32 ++++---- .../guru/NiciraNvpGuestNetworkGuru.java | 8 +- .../OpendaylightGuestNetworkGuru.java | 8 +- ...DaylightControllerResourceManagerImpl.java | 8 +- .../com/cloud/network/element/OvsElement.java | 58 +++++-------- .../network/guru/OvsGuestNetworkGuru.java | 5 +- .../network/ovs/OvsTunnelManagerImpl.java | 63 ++++++-------- .../ConfigTungstenFabricServiceCmd.java | 2 +- .../tungsten/service/TungstenElement.java | 11 +-- .../service/TungstenGuestNetworkGuru.java | 4 +- .../network/guru/VxlanGuestNetworkGuru.java | 2 +- .../ElastistorPrimaryDataStoreDriver.java | 2 +- .../ElastistorPrimaryDataStoreLifeCycle.java | 12 +-- .../provider/ElastistorHostListener.java | 10 +-- .../driver/DateraPrimaryDataStoreDriver.java | 31 ++++--- .../provider/DateraHostListener.java | 4 +- ...oudStackPrimaryDataStoreLifeCycleImpl.java | 8 +- .../LinstorPrimaryDataStoreDriverImpl.java | 4 +- .../LinstorPrimaryDataStoreLifeCycleImpl.java | 12 +-- .../SolidFirePrimaryDataStoreDriver.java | 18 ++-- ...idFireSharedPrimaryDataStoreLifeCycle.java | 28 +++---- .../provider/SolidFireHostListener.java | 38 ++++----- .../provider/SolidFireSharedHostListener.java | 22 ++--- .../storage/datastore/util/SolidFireUtil.java | 4 +- .../StorPoolBackupSnapshotCommandWrapper.java | 3 +- .../provider/StorPoolHostListener.java | 20 ++--- .../motion/StorPoolDataMotionStrategy.java | 18 ++-- .../snapshot/StorPoolSnapshotStrategy.java | 12 +-- .../snapshot/StorPoolVMSnapshotStrategy.java | 15 ++-- .../diagnostics/to/DiagnosticsDataObject.java | 8 ++ 89 files changed, 667 insertions(+), 634 deletions(-) diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 511d0bb0bdbb..d760897fbeae 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -132,6 +132,8 @@ public int value() { ServerResource _resource; Link _link; Long _id; + String uuid; + String name; Timer _timer = new Timer("Agent Timer"); Timer certTimer; @@ -182,8 +184,10 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour resource.setAgentControl(this); final String value = _shell.getPersistentProperty(getResourceName(), "id"); + uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); + name = _shell.getPersistentProperty(getResourceName(), "name"); _id = value != null ? Long.parseLong(value) : null; - logger.info("id is {}", ObjectUtils.defaultIfNull(_id, "")); + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), uuid, name); final Map params = new HashMap<>(); @@ -212,8 +216,9 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( "agentRequest-Handler")); - logger.info("Agent [id = {} : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", ObjectUtils.defaultIfNull(_id, "new"), getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", + ObjectUtils.defaultIfNull(_id, "new"), uuid, name, getResourceName(), + _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); } public String getVersion() { @@ -382,6 +387,26 @@ public void setId(final Long id) { _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + logger.debug("Set agent uuid {}", uuid); + this.uuid = uuid; + _shell.setPersistentProperty(getResourceName(), "uuid", uuid); + } + + public String getName() { + return name; + } + + public void setName(String name) { + logger.debug("Set agent name {}", name); + this.name = name; + _shell.setPersistentProperty(getResourceName(), "name", name); + } + private synchronized void scheduleServicesRestartTask() { if (certTimer != null) { certTimer.cancel(); @@ -594,10 +619,12 @@ public void processStartupAnswer(final Answer answer, final Response response, f return; } - logger.info("Process agent startup answer, agent [id: {}, name: {}] connected to the server", - startup.getHostId(), startup.getHostName()); + logger.info("Process agent startup answer, agent [id: {}, uuid: {}, name: {}] connected to the server", + startup.getHostId(), startup.getHostUuid(), startup.getHostName()); setId(startup.getHostId()); + setUuid(startup.getHostUuid()); + setName(startup.getHostName()); _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. setLastPingResponseTime(); @@ -605,8 +632,8 @@ public void processStartupAnswer(final Answer answer, final Response response, f _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); - logger.info("Startup Response Received: agent [id: {}, name: {}]", - getId(), startup.getHostName()); + logger.info("Startup Response Received: agent [id: {}, uuid: {}, name: {}]", + startup.getHostId(), startup.getHostUuid(), startup.getHostName()); } protected void processRequest(final Request request, final Link link) { @@ -862,15 +889,17 @@ public void processReadyCommand(final Command cmd) { NumbersUtil.enableHumanReadableSizes = humanReadable; } - logger.info("Processing agent ready command, agent id = {}", ready.getHostId()); + logger.info("Processing agent ready command, agent id = {}, uuid = {}, name = {}", ready.getHostId(), ready.getHostUuid(), ready.getHostName()); if (ready.getHostId() != null) { setId(ready.getHostId()); + setUuid(ready.getHostUuid()); + setName(ready.getHostName()); } verifyAgentArch(ready.getArch()); processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval()); - logger.info("Ready command is processed for agent id = {}", getId()); + logger.info("Ready command is processed for agent [id: {}, uuid: {}, name: {}]", getId(), getUuid(), getName()); } private void verifyAgentArch(String arch) { diff --git a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java index 42f1d264a50d..e2d974e38786 100644 --- a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java @@ -19,6 +19,8 @@ package com.cloud.agent.api; +import com.cloud.host.Host; + import java.util.List; public class ReadyCommand extends Command { @@ -30,6 +32,8 @@ public ReadyCommand() { private Long dcId; private Long hostId; + private String hostUuid; + private String hostName; private List msHostList; private String lbAlgorithm; private Long lbCheckInterval; @@ -41,9 +45,11 @@ public ReadyCommand(Long dcId) { this.dcId = dcId; } - public ReadyCommand(final Long dcId, final Long hostId, boolean enableHumanReadableSizes) { - this(dcId); - this.hostId = hostId; + public ReadyCommand(final Host host, boolean enableHumanReadableSizes) { + this(host.getDataCenterId()); + this.hostId = host.getId(); + this.hostUuid = host.getUuid(); + this.hostName = host.getName(); this.enableHumanReadableSizes = enableHumanReadableSizes; } @@ -68,6 +74,14 @@ public Long getHostId() { return hostId; } + public String getHostUuid() { + return hostUuid; + } + + public String getHostName() { + return hostName; + } + public List getMsHostList() { return msHostList; } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index a6a74176c136..e12c61b3dcf7 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -145,15 +145,7 @@ public String getPathSeparator() { @Override public String toString() { - return new StringBuilder("PrimaryDataStoreTO[uuid=").append(uuid) - .append("|name=") - .append(name) - .append("|id=") - .append(id) - .append("|pooltype=") - .append(poolType) - .append("]") - .toString(); + return String.format("PrimaryDataStoreTO[uuid=%s|name=%s|id=%d|pooltype=%s]", uuid, name, id, poolType); } public Boolean isFullCloneFlag() { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 0c113c90e6ac..9333410e0aa2 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -581,7 +581,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi } final Long dcId = host.getDataCenterId(); - final ReadyCommand ready = new ReadyCommand(dcId, host.getId(), NumbersUtil.enableHumanReadableSizes); + final ReadyCommand ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); ready.setWait(ReadyCommandWait.value()); final Answer answer = easySend(hostId, ready); if (answer == null || !answer.getResult()) { @@ -1146,7 +1146,7 @@ private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[ final HostVO host = _resourceMgr.createHostVOForConnectedAgent(startup); if (host != null) { checkHostArchOnCluster(host); - ready = new ReadyCommand(host.getDataCenterId(), host.getId(), NumbersUtil.enableHumanReadableSizes); + ready = new ReadyCommand(host, NumbersUtil.enableHumanReadableSizes); attache = sendReadyAndGetAttache(host, ready, link, startup); } } catch (final Exception e) { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java index 536b96c6567e..9b8fc5981719 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/AffinityGroupVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "affinity_group") @@ -119,9 +120,8 @@ public ControlledEntity.ACLType getAclType() { @Override public String toString() { - StringBuilder buf = new StringBuilder("AffinityGroup["); - buf.append(uuid).append("]"); - return buf.toString(); + return String.format("AffinityGroup %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); } @Override diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index ec6674477b07..705b35b40436 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -95,7 +95,7 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { if (vmGroupMapping != null) { if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); + logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM: " + vm); } long affinityGroupId = vmGroupMapping.getAffinityGroupId(); @@ -304,7 +304,7 @@ private ExcludeList updateAvoidList(List dedicatedResources DedicatedResourceVO dPod = _dedicatedDao.findByPodId(pod.getId()); if (dPod != null && !dedicatedResources.contains(dPod)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Avoiding POD %s [%s] because it is not dedicated.", pod.getName(), pod.getUuid())); + logger.debug(String.format("Avoiding POD %s because it is not dedicated.", pod)); } avoidList.addPod(pod.getId()); } else { @@ -345,7 +345,7 @@ private ExcludeList updateAvoidList(List dedicatedResources for (HostPodVO pod : pods) { if (podsInIncludeList != null && !podsInIncludeList.contains(pod.getId())) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Avoiding POD %s [%s], as it is not in include list.", pod.getName(), pod.getUuid())); + logger.debug(String.format("Avoiding POD %s, as it is not in include list.", pod)); } avoidList.addPod(pod.getId()); } diff --git a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java index b94cf49e4d94..b97b8e224ad7 100644 --- a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java @@ -80,7 +80,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { */ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, DeploymentPlan plan, VirtualMachine vm, List vmList) { AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug("Processing affinity group {} for VM {}", group, vm); List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); groupVMIds.remove(vm.getId()); diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 4681ce4321ee..bd29a48f2588 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -94,7 +94,7 @@ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, Exclude AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug(String.format("Processing affinity group %s for VM %s", group, vm)); } List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -106,7 +106,7 @@ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, Exclude if (groupVM.getHostId() != null) { avoid.addHost(groupVM.getHostId()); if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + logger.debug("Added host {} to avoid set, since VM {} is present on the host", groupVM.getHostId(), groupVM); } } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { @@ -114,8 +114,7 @@ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, Exclude if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { avoid.addHost(groupVM.getLastHostId()); if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + - " is present on the host, in Stopped state but has reserved capacity"); + logger.debug("Added host {} to avoid set, since VM {} is present on the host, in Stopped state but has reserved capacity", groupVM.getLastHostId(), groupVM); } } } @@ -155,8 +154,7 @@ public boolean check(VirtualMachineProfile vmProfile, DeployDestination plannedD VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { if (logger.isDebugEnabled()) { - logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + - " reserved on the same host " + plannedHostId); + logger.debug(String.format("Planned destination for VM %s conflicts with an existing VM %d reserved on the same host %s", vm, vmReservation.getVmId(), plannedDestination.getHost())); } return false; } diff --git a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java index f227a3ffc8db..49e3f60ed5d0 100644 --- a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java +++ b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java @@ -77,7 +77,7 @@ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, Deploym AffinityGroupVO group = affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug(String.format("Processing affinity group %s for VM: %s", group, vm)); } List groupVMIds = affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -95,17 +95,17 @@ protected void processVmInAffinityGroup(DeploymentPlan plan, VMInstanceVO groupV if (groupVM.getHostId() != null) { Integer priority = adjustHostPriority(plan, groupVM.getHostId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Updated host %s priority to %s , since VM %s is present on the host", - groupVM.getHostId(), priority, groupVM.getId())); + logger.debug(String.format("Updated host %s priority to %s, since VM %s is present on the host", + groupVM.getHostId(), priority, groupVM)); } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < vmCapacityReleaseInterval) { Integer priority = adjustHostPriority(plan, groupVM.getLastHostId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Updated host %s priority to %s , since VM %s" + + logger.debug(String.format("Updated host %s priority to %s, since VM %s" + " is present on the host, in %s state but has reserved capacity", - groupVM.getLastHostId(), priority, groupVM.getId(), groupVM.getState())); + groupVM.getLastHostId(), priority, groupVM, groupVM.getState())); } } } diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java index c87ff3dfc827..e93b8df39e95 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java @@ -98,11 +98,11 @@ public void updateSiocInfo(long zoneId, long storagePoolId, int sharesPerGB, int } if (storagePool.getDataCenterId() != zoneId) { - throw new Exception("Error: Storage pool '" + storagePool.getName() + "' is not in zone ID " + zoneId + "."); + throw new Exception(String.format("Error: Storage pool %s is not in zone %s.", storagePool, zone)); } if (!storagePool.getPoolType().equals(StoragePoolType.VMFS)) { - throw new Exception("Error: Storage pool '" + storagePool.getName() + "' does not represent a VMFS datastore."); + throw new Exception(String.format("Error: Storage pool %s does not represent a VMFS datastore.", storagePool)); } String lockName = zone.getUuid() + "-" + storagePool.getUuid(); @@ -193,7 +193,7 @@ private ResultWrapper updateSiocInfo(VMwareUtil.VMwareConnection connection, Map ManagedObjectReference morVm = nameToVm.get(vmName); if (morVm == null) { - String errMsg = "Error: The VM with ID " + instanceId + " could not be located (ManagedObjectReference)."; + String errMsg = String.format("Error: The VM %s could not be located (ManagedObjectReference).", vmInstance); throw new Exception(errMsg); } @@ -336,7 +336,7 @@ private ResultWrapper updateSiocInfoForWorkerVM(VMwareUtil.VMwareConnection conn } private String getInfoMsg(Volume volume, Integer newShares, Long newLimitIops) { - String msgPrefix = "VMware SIOC: Volume = " + volume.getName(); + String msgPrefix = String.format("VMware SIOC: Volume %s", volume); String msgNewShares = newShares != null ? "; New Shares = " + newShares : ""; @@ -354,8 +354,7 @@ private VolumeVO getVolumeFromVirtualDisk(VMInstanceVO vmInstance, long storageP List volumes = volumeDao.findByInstance(vmInstance.getId()); if (volumes == null || volumes.size() == 0) { - String errMsg = "Error: The VMware virtual disk '" + disk + "' could not be mapped to a CloudStack volume. " + - "There were no volumes for the VM with the following ID: " + vmInstance.getId() + "."; + String errMsg = String.format("Error: The VMware virtual disk '%s' could not be mapped to a CloudStack volume. There were no volumes for the VM: %s.", disk, vmInstance); throw new Exception(errMsg); } diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index f162c51a703d..d4b3cff0f5c1 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -67,20 +67,20 @@ public boolean isValidProviderOffering(Long zoneId, String uuid) { @Override public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { - logger.debug("Creating VM backup for VM " + vm.getInstanceName() + " from backup offering " + backupOffering.getName()); + logger.debug("Creating VM backup for VM {} from backup offering {}", vm, backupOffering); ((VMInstanceVO) vm).setBackupExternalId("dummy-external-backup-id"); return true; } @Override public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { - logger.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring vm {} from backup {} on the Dummy Backup Provider", vm, backup); return true; } @Override public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { - logger.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring volume {} from backup {} on the Dummy Backup Provider", volumeUuid, backup); throw new CloudRuntimeException("Dummy plugin does not support this feature"); } @@ -101,7 +101,7 @@ public Map getBackupMetrics(Long zoneId, List altClusterHosts = hostDao.findHypervisorHostInCluster(host.getClusterId()); for (final HostVO candidateClusterHost : altClusterHosts) { if ( candidateClusterHost.getStatus() == Status.Up ) { - LOG.debug("Found Host " + candidateClusterHost.getName()); + LOG.debug(String.format("Found Host %s", candidateClusterHost)); return candidateClusterHost; } } @@ -182,7 +182,7 @@ protected HostVO getLastVMHypervisorHost(VirtualMachine vm) { List altZoneHosts = hostDao.findByDataCenterId(host.getDataCenterId()); for (final HostVO candidateZoneHost : altZoneHosts) { if ( candidateZoneHost.getStatus() == Status.Up && candidateZoneHost.getHypervisorType() == Hypervisor.HypervisorType.KVM ) { - LOG.debug("Found Host " + candidateZoneHost.getName()); + LOG.debug("Found Host " + candidateZoneHost); return candidateZoneHost; } } @@ -331,7 +331,7 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { final NetworkerBackup networkerBackup=getClient(zoneId).getNetworkerBackupInfo(externalBackupId); final String SSID = networkerBackup.getShortId(); - LOG.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Networker Backup Provider"); + LOG.debug(String.format("Restoring vm %s from backup %s on the Networker Backup Provider", vm, backup)); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -359,7 +359,7 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug("Starting Restore for VM ID " + vm.getUuid() + " and SSID" + SSID + " at " + restoreJobStart); + LOG.debug(String.format("Starting Restore for VM ID %s and %s at %s", vm, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); @@ -387,7 +387,7 @@ public Pair restoreBackedUpVolume(Backup backup, String volumeU final String destinationNetworkerClient = hostVO.getName().split("\\.")[0]; Long restoredVolumeDiskSize = 0L; - LOG.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Networker Backup Provider"); + LOG.debug(String.format("Restoring volume %s with uuid %s from backup %s on the Networker Backup Provider", volume, volumeUuid, backup)); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -448,7 +448,7 @@ public Pair restoreBackedUpVolume(Backup backup, String volumeU script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug("Starting Restore for Volume UUID " + volume.getUuid() + " and SSID" + SSID + " at " + restoreJobStart); + LOG.debug(String.format("Starting Restore for Volume UUID %s and SSID %s at %s", volume, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); @@ -505,18 +505,18 @@ public boolean takeBackup(VirtualMachine vm) { if ( Boolean.TRUE.equals(NetworkerClientVerboseLogs.value()) ) script.add("-v"); - LOG.debug("Starting backup for VM ID " + vm.getUuid() + " on Networker provider"); + LOG.debug("Starting backup for VM {} on Networker provider", vm); Date backupJobStart = new Date(); String saveTime = executeBackupCommand(hostVO, credentials.first(), credentials.second(), script.toString()); - LOG.info ("EMC Networker finished backup job for vm " + vm.getName() + " with saveset Time: " + saveTime); + LOG.info("EMC Networker finished backup job for vm {} with saveset Time: {}", vm, saveTime); BackupVO backup = getClient(vm.getDataCenterId()).registerBackupForVm(vm, backupJobStart, saveTime); if (backup != null) { backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); backupDao.persist(backup); return true; } else { - LOG.error("Could not register backup for vm " + vm.getName() + " with saveset Time: " + saveTime); + LOG.error("Could not register backup for vm {} with saveset Time: {}", vm, saveTime); // We need to handle this rare situation where backup is successful but can't be registered properly. return false; } @@ -558,8 +558,7 @@ public Map getBackupMetrics(Long zoneId, List getBackupsForVm(VirtualMachine vm) { SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyy-MM-dd'T'HH:mm:ss"); - LOG.debug("Trying to list EMC Networker backups for VM " + vm.getName()); + LOG.debug(String.format("Trying to list EMC Networker backups for VM %s", vm)); try { final HttpResponse response = get("/global/backups/?q=name:" + vm.getName()); checkResponseOK(response); @@ -310,7 +310,7 @@ public ArrayList getBackupsForVm(VirtualMachine vm) { return backupsTaken; } for (final NetworkerBackup backup : networkerBackups.getBackups()) { - LOG.debug("Found Backup " + backup.getId()); + LOG.debug(String.format("Found Backup %s", backup)); // Backups that have expired on the EMC Networker but not removed yet will not be added try { Date backupRetentionTime = formatterDateTime.parse(backup.getRetentionTime()); @@ -345,7 +345,7 @@ public List listPolicies() { return policies; } for (final ProtectionPolicy protectionPolicy : protectionPolicies.getProtectionPolicies()) { - LOG.debug("Found Protection Policy:" + protectionPolicy.getName()); + LOG.debug(String.format("Found Protection Policy: %s", protectionPolicy)); policies.add(new NetworkerBackupOffering(protectionPolicy.getName(), protectionPolicy.getResourceId().getId())); } return policies; diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index 4750e3264aac..c120d8bd5999 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -173,7 +173,7 @@ public boolean assignVMToBackupOffering(final VirtualMachine vm, final BackupOff final String clonedJobName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); if (!client.cloneVeeamJob(parentJob, clonedJobName)) { - logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " but will check the list of jobs again if it was eventually succeeded."); + logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering [id: {}, name: {}] but will check the list of jobs again if it was eventually succeeded.", backupOffering.getExternalId(), backupOffering.getName()); } for (final BackupOffering job : client.listJobs()) { @@ -182,7 +182,7 @@ public boolean assignVMToBackupOffering(final VirtualMachine vm, final BackupOff if (BooleanUtils.isTrue(clonedJob.getScheduleConfigured()) && !clonedJob.getScheduleEnabled()) { client.toggleJobSchedule(clonedJob.getId()); } - logger.debug("Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " found, now trying to assign the VM to the job."); + logger.debug("Veeam job (backup offering) for backup offering [id: {}, name: {}] found, now trying to assign the VM to the job.", backupOffering.getExternalId(), backupOffering.getName()); final VmwareDatacenter vmwareDC = findVmwareDatacenterForVM(vm); if (client.addVMToVeeamJob(job.getExternalId(), vm.getInstanceName(), vmwareDC.getVcenterHost())) { ((VMInstanceVO) vm).setBackupExternalId(job.getExternalId()); @@ -229,7 +229,7 @@ public boolean takeBackup(final VirtualMachine vm) { public boolean deleteBackup(Backup backup, boolean forced) { VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); if (vm == null) { - throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), backup.getExternalId())); + throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, name: %s, externalId: %s].", backup.getUuid(), backup.getName(), backup.getExternalId())); } if (!forced) { logger.debug(String.format("Veeam backup provider does not have a safe way to remove a single restore point, which results in all backup chain being removed. " @@ -315,8 +315,8 @@ public Map getBackupMetrics(final Long zoneId, fi } Metric metric = backendMetrics.get(vm.getUuid()); - logger.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), - vm.getInstanceName(), metric.getBackupSize(), metric.getDataSize())); + logger.debug("Metrics for VM [{}] is [backup size: {}, data size: {}].", vm, + metric.getBackupSize(), metric.getDataSize()); metrics.put(vm, metric); } return metrics; @@ -331,8 +331,8 @@ private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(List bac for (final Backup backup : backupsInDb) { if (restorePoint.getId().equals(backup.getExternalId())) { if (metric != null) { - logger.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backup.getUuid(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); + logger.debug("Update backup with [id: {}, uuid: {}, name: {}, external id: {}] from [size: {}, protected size: {}] to [size: {}, protected size: {}].", + backup.getId(), backup.getUuid(), backup.getName(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize()); ((BackupVO) backup).setSize(metric.getBackupSize()); ((BackupVO) backup).setProtectedSize(metric.getDataSize()); @@ -348,7 +348,7 @@ private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(List bac public void syncBackups(VirtualMachine vm, Backup.Metric metric) { List restorePoints = listRestorePoints(vm); if (CollectionUtils.isEmpty(restorePoints)) { - logger.debug(String.format("Can't find any restore point to VM: [uuid: %s, name: %s].", vm.getUuid(), vm.getInstanceName())); + logger.debug("Can't find any restore point to VM: {}", vm); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -379,9 +379,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); - logger.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " - + "domain_id: %s, zone_id: %s].", backup.getUuid(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), - backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId())); + logger.debug("Creating a new entry in backups: [id: {}, uuid: {}, name: {}, vm_id: {}, external_id: {}, type: {}, date: {}, backup_offering_id: {}, account_id: {}, " + + "domain_id: {}, zone_id: {}].", backup.getId(), backup.getUuid(), backup.getName(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId()); backupDao.persist(backup); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_BACKUP_CREATE, diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java index 1f2de8f31961..cbfe2fda5929 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java @@ -61,7 +61,7 @@ public void deleteBackupTestExceptionWhenVmIsNull() { backupProvider.deleteBackup(backup, false); } catch (Exception e) { assertEquals(CloudRuntimeException.class, e.getClass()); - String expected = String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), "abc"); + String expected = String.format("Could not find any VM associated with the Backup [uuid: %s, name: null, externalId: %s].", backup.getUuid(), "abc"); assertEquals(expected , e.getMessage()); } } diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 4f1db396b7c4..27b5f175d9e5 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -142,7 +142,7 @@ public List dedicateZone(final Long zoneId, final Long doma DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(zoneId); //check if zone is dedicated if (dedicatedZone != null) { - logger.error("Zone " + dc.getName() + " is already dedicated"); + logger.error(String.format("Zone %s is already dedicated", dc)); throw new CloudRuntimeException("Zone " + dc.getName() + " is already dedicated"); } @@ -161,7 +161,7 @@ public List dedicateZone(final Long zoneId, final Long doma if (dPod.getAccountId().equals(accountId)) { podsToRelease.add(dPod); } else { - logger.error("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Pod %s under this Zone %s is dedicated to different account/domain", pod, dc)); throw new CloudRuntimeException("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -187,7 +187,7 @@ public List dedicateZone(final Long zoneId, final Long doma if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - logger.error("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Cluster %s under this Zone %s is dedicated to different account/domain", cluster, dc)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } @@ -214,7 +214,7 @@ public List dedicateZone(final Long zoneId, final Long doma if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Host %s under this Zone %s is dedicated to different account/domain", host, dc)); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -292,7 +292,7 @@ public List dedicatePod(final Long podId, final Long domain DedicatedResourceVO dedicatedZoneOfPod = _dedicatedDao.findByZoneId(pod.getDataCenterId()); //check if pod is dedicated if (dedicatedPod != null) { - logger.error("Pod " + pod.getName() + " is already dedicated"); + logger.error(String.format("Pod %s is already dedicated", pod)); throw new CloudRuntimeException("Pod " + pod.getName() + " is already dedicated"); } @@ -302,7 +302,7 @@ public List dedicatePod(final Long podId, final Long domain if (dedicatedZoneOfPod.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfPod.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(pod.getDataCenterId()); - logger.error("Cannot dedicate Pod. Its zone is already dedicated"); + logger.error(String.format("Cannot dedicate Pod. Its zone%s is already dedicated", zone)); throw new CloudRuntimeException("Pod's Zone " + zone.getName() + " is already dedicated"); } } @@ -323,7 +323,7 @@ public List dedicatePod(final Long podId, final Long domain if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - logger.error("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Cluster %s under this Pod %s is dedicated to different account/domain", cluster, pod)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } @@ -350,7 +350,7 @@ public List dedicatePod(final Long podId, final Long domain if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error(String.format("Host %s under this Pod %s is dedicated to different account/domain", host, pod)); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -421,7 +421,7 @@ public List dedicateCluster(final Long clusterId, final Lon //check if cluster is dedicated if (dedicatedCluster != null) { - logger.error("Cluster " + cluster.getName() + " is already dedicated"); + logger.error(String.format("Cluster %s is already dedicated", cluster)); throw new CloudRuntimeException("Cluster " + cluster.getName() + " is already dedicated"); } @@ -430,8 +430,8 @@ public List dedicateCluster(final Long clusterId, final Lon //can dedicate a cluster to an account/domain if pod is dedicated to parent-domain if (dedicatedPodOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - logger.error("Cannot dedicate Cluster. Its Pod is already dedicated"); HostPodVO pod = _podDao.findById(cluster.getPodId()); + logger.error(String.format("Cannot dedicate Cluster %s. Its Pod %s is already dedicated", cluster, pod)); throw new CloudRuntimeException("Cluster's Pod " + pod.getName() + " is already dedicated"); } } @@ -441,8 +441,8 @@ public List dedicateCluster(final Long clusterId, final Lon //can dedicate a cluster to an account/domain if zone is dedicated to parent-domain if (dedicatedZoneOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - logger.error("Cannot dedicate Cluster. Its zone is already dedicated"); DataCenterVO zone = _zoneDao.findById(cluster.getDataCenterId()); + logger.error(String.format("Cannot dedicate Cluster %s. Its zone %s is already dedicated", cluster, zone)); throw new CloudRuntimeException("Cluster's Zone " + zone.getName() + " is already dedicated"); } } @@ -463,7 +463,7 @@ public List dedicateCluster(final Long clusterId, final Lon if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); + logger.error(String.format("Cannot dedicate Cluster %s to account %s", cluster, accountName)); throw new CloudRuntimeException("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); } } else { @@ -536,7 +536,7 @@ public List dedicateHost(final Long hostId, final Long doma DedicatedResourceVO dedicatedZoneOfHost = _dedicatedDao.findByZoneId(host.getDataCenterId()); if (dedicatedHost != null) { - logger.error("Host " + host.getName() + " is already dedicated"); + logger.error(String.format("Host %s is already dedicated", host)); throw new CloudRuntimeException("Host " + host.getName() + " is already dedicated"); } @@ -546,7 +546,7 @@ public List dedicateHost(final Long hostId, final Long doma if (dedicatedClusterOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedClusterOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - logger.error("Host's Cluster " + cluster.getName() + " is already dedicated"); + logger.error(String.format("Host's Cluster %s is already dedicated", cluster)); throw new CloudRuntimeException("Host's Cluster " + cluster.getName() + " is already dedicated"); } } @@ -557,7 +557,7 @@ public List dedicateHost(final Long hostId, final Long doma if (dedicatedPodOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { HostPodVO pod = _podDao.findById(host.getPodId()); - logger.error("Host's Pod " + pod.getName() + " is already dedicated"); + logger.error(String.format("Host's Pod %s is already dedicated", pod)); throw new CloudRuntimeException("Host's Pod " + pod.getName() + " is already dedicated"); } } @@ -568,7 +568,7 @@ public List dedicateHost(final Long hostId, final Long doma if (dedicatedZoneOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(host.getDataCenterId()); - logger.error("Host's Data Center " + zone.getName() + " is already dedicated"); + logger.error(String.format("Host's Data Center %s is already dedicated", zone)); throw new CloudRuntimeException("Host's Data Center " + zone.getName() + " is already dedicated"); } } @@ -576,7 +576,7 @@ public List dedicateHost(final Long hostId, final Long doma List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); - checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, hostId); + checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, host); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -662,22 +662,22 @@ private List getVmsOnHost(long hostId) { return vms; } - private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, long hostId) { + private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, Host host) { boolean suitable = true; - List allVmsOnHost = getVmsOnHost(hostId); + List allVmsOnHost = getVmsOnHost(host.getId()); if (accountId != null) { for (UserVmVO vm : allVmsOnHost) { if (vm.getAccountId() != accountId) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); - throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another account", host)); + throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another account"); } } } else { for (UserVmVO vm : allVmsOnHost) { if (!domainIds.contains(vm.getDomainId())) { - logger.info("Host " + vm.getHostId() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); - throw new CloudRuntimeException("Host " + hostId + " found to be unsuitable for explicit dedication as it is " + + logger.info(String.format("Host %s found to be unsuitable for explicit dedication as it is running instances of another domain", host)); + throw new CloudRuntimeException("Host " + host.getUuid() + " found to be unsuitable for explicit dedication as it is " + "running instances of another domain"); } } @@ -688,7 +688,7 @@ private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { boolean suitable = true; for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host.getId()); + checkHostSuitabilityForExplicitDedication(accountId, domainIds, host); } return suitable; } @@ -939,7 +939,7 @@ public boolean releaseDedicatedResource(final Long zoneId, Long podId, Long clus public void doInTransactionWithoutResult(TransactionStatus status) { Long resourceId = resourceFinal.getId(); if (!_dedicatedDao.remove(resourceId)) { - throw new CloudRuntimeException("Failed to delete Resource " + resourceId); + throw new CloudRuntimeException(String.format("Failed to delete Resource %s", resourceFinal)); } if (zoneId != null) { // remove the domainId set in zone diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index bd1bcf061013..b971b3b8596a 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -156,13 +156,16 @@ private boolean checkHostSuitabilityForImplicitDedication(Long accountId, List listHostsByTags(Host.Type type, long dcId, Long podId, Lo if (logger.isDebugEnabled()) { logger.debug(String.format("Found %d hosts %s with type: %s, zone ID: %d, pod ID: %d, cluster ID: %s, offering host tag(s): %s, template tag: %s", taggedHosts.size(), - (taggedHosts.isEmpty() ? "" : String.format("(%s)", StringUtils.join(taggedHosts.stream().map(HostVO::getId).toArray(), ","))), + (taggedHosts.isEmpty() ? "" : String.format("(%s)", StringUtils.join(taggedHosts.stream().map(HostVO::toString).toArray(), ","))), type.name(), dcId, podId, clusterId, offeringHostTag, templateTag)); } return taggedHosts; @@ -139,19 +139,19 @@ private List findSuitableHosts(VirtualMachineProfile vmProfile, Deployment } if (avoid.shouldAvoid(host)) { if (logger.isDebugEnabled()) { - logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + logger.debug(String.format("Host %s is in avoid set, skipping this and trying other available hosts", host)); } continue; } Pair cpuCapabilityAndCapacity = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { if (logger.isDebugEnabled()) { - logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + logger.debug(String.format("Not using host %s; host has cpu capability? %s, host has capacity? %s", host, cpuCapabilityAndCapacity.first(), cpuCapabilityAndCapacity.second())); } continue; } if (logger.isDebugEnabled()) { - logger.debug("Found a suitable host, adding to list: " + host.getId()); + logger.debug(String.format("Found a suitable host, adding to list: %s", host)); } suitableHosts.add(host); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 321369b24b9e..f918f66941e9 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -258,7 +258,7 @@ public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForc List deadVms = _vmDao.listByLastHostId(host.getId()); for (VMInstanceVO vm : deadVms) { if (vm.getState() == State.Running || vm.getHostId() != null) { - throw new CloudRuntimeException("VM " + vm.getId() + "is still running on host " + host.getId()); + throw new CloudRuntimeException(String.format("VM %s is still running on host %s", vm, host)); } _vmDao.remove(vm.getId()); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index 0aba829f3cc7..1b2691c43c3e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -80,7 +80,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); Cluster c = _clusterDao.findById(h.getClusterId()); - logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); + logger.debug(String.format("Start baremetal vm %s on last stayed host %s", vm, h)); return new DeployDestination(dc, pod, c, h); } @@ -124,7 +124,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl if (haVmTag == null) { hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); } else { - logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + + logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster " + cluster + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId()); return null; } @@ -136,7 +136,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); if (_capacityMgr.checkIfHostHasCapacity(h, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - logger.debug("Find host " + h.getId() + " has enough capacity"); + logger.debug(String.format("Find host %s has enough capacity", h)); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); return new DeployDestination(dc, pod, cluster, h); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java index bf991b77e1cb..d90ea6c37312 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -98,10 +98,10 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t if (newState == State.Starting) { host.setDetail("vmName", vo.getInstanceName()); - logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); + logger.debug(String.format("Add vmName %s to host %s details", host.getDetail("vmName"), host)); } else { if (host.getDetail("vmName") != null && host.getDetail("vmName").equalsIgnoreCase(vo.getInstanceName())) { - logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); + logger.debug(String.format("Remove vmName %s from host %s details", host.getDetail("vmName"), host)); host.getDetails().remove("vmName"); } } @@ -142,13 +142,13 @@ public void notifyProvisionDone(BaremetalProvisionDoneNotificationCmd cmd) { } if (State.Starting != vm.getState()) { - throw new CloudRuntimeException(String.format("baremetal instance[name:%s, state:%s] is not in state of Starting", vmName, vm.getState())); + throw new CloudRuntimeException(String.format("baremetal instance %s [state:%s] is not in state of Starting", vm, vm.getState())); } vm.setState(State.Running); vm.setLastHostId(vm.getHostId()); vmDao.update(vm.getId(), vm); - logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", - vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress())); + logger.debug(String.format("received baremetal provision done notification for vm %s running on host %s [mac:%s, ip:%s]", + vm, host, host.getPrivateMacAddress(), host.getPrivateIpAddress())); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java index d820fd5b6d30..3d79b9efdd13 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java @@ -66,7 +66,7 @@ public Status isAgentAlive(Host agent) { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId(), e); + logger.debug(String.format("Failed to send command to host: %s", neighbor), e); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java index 283f4dc0c96c..a5947238bf6d 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java @@ -136,7 +136,7 @@ public final void processConnect(final Host agent, final StartupCommand cmd, fin } if (logger.isDebugEnabled()) { - logger.debug("Setting up host " + agentId); + logger.debug(String.format("Setting up host %s", agent)); } HostEnvironment env = new HostEnvironment(); @@ -161,14 +161,14 @@ public final void processConnect(final Host agent, final StartupCommand cmd, fin if (reason == null) { reason = " details were null"; } - logger.warn("Unable to setup agent " + agentId + " due to " + reason); + logger.warn(String.format("Unable to setup agent %s due to %s", agent, reason)); } // Error handling borrowed from XcpServerDiscoverer, may need to be // updated. } catch (AgentUnavailableException e) { - logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn(String.format("Unable to setup agent %s because it became unavailable.", agent), e); } catch (OperationTimedoutException e) { - logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn(String.format("Unable to setup agent %s because it timed out", agent), e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -256,7 +256,7 @@ public final Map> find(final long } logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDirectConnectResource for zone/pod/cluster " + dcId + "/" + podId + "/" + - clusterId); + cluster); // Some Hypervisors organise themselves in pools. // The startup command tells us what pool they are using. @@ -391,7 +391,7 @@ public final HostVO createHostVOForDirectConnectAgent(final HostVO host, final S return null; } - logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); + logger.info(String.format("Host: %s connected with hypervisor type: %s. Checking CIDR...", host, HypervisorType.Hyperv)); HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java index c00ee70bf135..d488ee2058fd 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/guru/HypervGuru.java @@ -123,7 +123,7 @@ else if (nicProfile.getTrafficType() == TrafficType.Control) { String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId); nicTo.setMac(mac); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId); + throw new CloudRuntimeException(String.format("unable to allocate mac address on network: %s", network.getUuid())); } nicTo.setDns1(profile.getIPv4Dns1()); nicTo.setDns2(profile.getIPv4Dns2()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index fe6be10f9b84..10cec7025139 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -278,13 +278,13 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) { - logger.warn("Failed to disconnect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to disconnect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName()); } } else { primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds()); @@ -427,17 +427,19 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { - logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect base template volume [id: {}, name: {}, path:" + + " {}], in storage pool [id: {}, name: {}]", template.getUuid(), + template.getName(), templatePath, primaryStore.getUuid(), primaryStore.getName()); } BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); if (BaseVol == null) { - logger.debug("Failed to get the physical disk for base template volume at path: " + templatePath); - throw new CloudRuntimeException("Failed to get the physical disk for base template volume at path: " + templatePath); + logger.debug("Failed to get the physical disk for base template volume [id: {}, name: {}, path: {}]", template.getUuid(), template.getName(), templatePath); + throw new CloudRuntimeException(String.format("Failed to get the physical disk for base template volume [id: %s, name: %s, path: %s]", template.getUuid(), template.getName(), templatePath)); } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect new volume at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName()); } BaseVol.setDispName(template.getName()); @@ -1096,10 +1098,10 @@ private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObject try { Files.deleteIfExists(Paths.get(snapshotPath)); } catch (IOException ex) { - logger.error(String.format("Failed to delete snapshot [%s] on primary storage [%s].", snapshotPath, primaryPool.getUuid()), ex); + logger.error("Failed to delete snapshot [{}] on primary storage [{}].", snapshot.getId(), snapshot.getName(), ex); } } else { - logger.debug(String.format("This backup is temporary, not deleting snapshot [%s] on primary storage [%s]", snapshotPath, primaryPool.getUuid())); + logger.debug("This backup is temporary, not deleting snapshot [{}] on primary storage [{}]", snapshot.getId(), snapshot.getName()); } } @@ -1551,14 +1553,15 @@ public Answer attachVolume(final AttachCommand cmd) { return new AttachAnswer(disk); } catch (final LibvirtException e) { - logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug(String.format("Failed to attach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", + vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); return new AttachAnswer(e.toString()); } catch (final InternalErrorException e) { - logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug(String.format("Failed to attach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); return new AttachAnswer(e.toString()); } catch (final CloudRuntimeException e) { - logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug(String.format("Failed to attach volume: [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); return new AttachAnswer(e.toString()); } finally { vol.clearPassphrase(); @@ -1588,14 +1591,8 @@ public Answer dettachVolume(final DettachCommand cmd) { storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); return new DettachAnswer(disk); - } catch (final LibvirtException e) { - logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); - return new DettachAnswer(e.toString()); - } catch (final InternalErrorException e) { - logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); - return new DettachAnswer(e.toString()); - } catch (final CloudRuntimeException e) { - logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + } catch (final LibvirtException | InternalErrorException | CloudRuntimeException e) { + logger.debug(String.format("Failed to detach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e); return new DettachAnswer(e.toString()); } finally { vol.clearPassphrase(); @@ -2134,7 +2131,7 @@ public Answer deleteVolume(final DeleteCommand cmd) { try { pool.getPhysicalDisk(vol.getPath()); } catch (final Exception e) { - logger.debug("can't find volume: " + vol.getPath() + ", return true"); + logger.debug(String.format("can't find volume: %s, return true", vol)); return new Answer(null); } pool.deletePhysicalDisk(vol.getPath(), vol.getFormat()); @@ -2429,7 +2426,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) logger.debug("Checking for free space on the host for downloading the template with physical size: " + templateSize + " and virtual size: " + cmd.getTemplateSize()); if (!isEnoughSpaceForDownloadTemplateOnTemporaryLocation(templateSize)) { - String msg = "Not enough space on the defined temporary location to download the template " + cmd.getTemplateId(); + String msg = String.format("Not enough space on the defined temporary location to download the template %swith id %d", cmd.getDestData(), cmd.getTemplateId()); logger.error(msg); return new DirectDownloadAnswer(false, msg, true); } @@ -2452,7 +2449,8 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) String destTemplatePath = (destTemplate != null) ? destTemplate.getPath() : null; if (!storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath, null)) { - logger.warn("Unable to connect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + logger.warn(String.format("Unable to connect physical disk at path: %s, in storage pool [id: %d, uuid: %s, name: %s, path: %s]", + destTemplatePath, pool.getId(), pool.getUuid(), pool.getName(), pool.getPath())); } template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destTemplatePath, destPool, cmd.getFormat(), cmd.getWaitInMillSeconds()); @@ -2465,7 +2463,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) try { Files.deleteIfExists(Path.of(templatePath)); } catch (IOException ioException) { - logger.warn("Unable to remove file [{}]; consider removing it manually.", templatePath, ioException); + logger.warn("Unable to remove file [name: {}, path: {}]; consider removing it manually.", template.getName(), templatePath, ioException); } logger.error("The downloaded file [{}] is not a valid QCOW2.", templatePath, e); @@ -2474,10 +2472,10 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) } if (!storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath)) { - logger.warn("Unable to disconnect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + logger.warn(String.format("Unable to disconnect physical disk at path: %s, in storage pool [id: %d, uuid: %s, name: %s, path: %s]", destTemplatePath, pool.getId(), pool.getUuid(), pool.getName(), pool.getUuid())); } } catch (CloudRuntimeException e) { - logger.warn("Error downloading template " + cmd.getTemplateId() + " due to: " + e.getMessage()); + logger.warn(String.format("Error downloading template %s with id %d due to: %s", cmd.getDestData(), cmd.getTemplateId(), e.getMessage())); return new DirectDownloadAnswer(false, "Unable to download template: " + e.getMessage(), true); } catch (IllegalArgumentException e) { return new DirectDownloadAnswer(false, "Unable to create direct downloader: " + e.getMessage(), true); @@ -2503,18 +2501,25 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { KVMStoragePool destPool = null; try { - logger.debug("Copying src volume (id: " + srcVol.getId() + ", format: " + srcFormat + ", path: " + srcVolumePath + ", primary storage: [id: " + srcPrimaryStore.getId() + ", type: " + srcPrimaryStore.getPoolType() + "]) to dest volume (id: " + - destVol.getId() + ", format: " + destFormat + ", path: " + destVolumePath + ", primary storage: [id: " + destPrimaryStore.getId() + ", type: " + destPrimaryStore.getPoolType() + "])."); + logger.debug(String.format("Copying src volume (id: %d, uuid: %s, name: %s, format:" + + " %s, path: %s, primary storage: [id: %d, uuid: %s, name: %s, type: " + + "%s]) to dest volume (id: %d, uuid: %s, name: %s, format: %s, path: " + + "%s, primary storage: [id: %d, uuid: %s, name: %s, type: %s]).", + srcVol.getId(), srcVol.getUuid(), srcVol.getName(), srcFormat, srcVolumePath, + srcPrimaryStore.getId(), srcPrimaryStore.getUuid(), srcPrimaryStore.getName(), + srcPrimaryStore.getPoolType(), destVol.getId(), destVol.getUuid(), destVol.getName(), + destFormat, destVolumePath, destPrimaryStore.getId(), destPrimaryStore.getUuid(), + destPrimaryStore.getName(), destPrimaryStore.getPoolType())); if (srcPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath, srcPrimaryStore.getDetails())) { - logger.warn("Failed to connect src volume at path: " + srcVolumePath + ", in storage pool id: " + srcPrimaryStore.getUuid()); + logger.warn(String.format("Failed to connect src volume %s, in storage pool %s", srcVol, srcPrimaryStore)); } } final KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath); if (volume == null) { - logger.debug("Failed to get physical disk for volume: " + srcVolumePath); + logger.debug("Failed to get physical disk for volume: " + srcVol); throw new CloudRuntimeException("Failed to get physical disk for volume at path: " + srcVolumePath); } @@ -2525,7 +2530,7 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { String destVolumeName = null; if (destPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { - logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); + logger.warn("Failed to connect dest volume {}, in storage pool {}", destVol, destPrimaryStore); } destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails()); } else { @@ -2544,7 +2549,8 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds()); } } catch (Exception e) { // Any exceptions while copying the disk, should send failed answer with the error message - String errMsg = String.format("Failed to copy volume: %s to dest storage: %s, due to %s", srcVol.getName(), destPrimaryStore.getName(), e.toString()); + String errMsg = String.format("Failed to copy volume [uuid: %s, name: %s] to dest storage [id: %s, name: %s], due to %s", + srcVol.getUuid(), srcVol.getName(), destPrimaryStore.getUuid(), destPrimaryStore.getName(), e.toString()); logger.debug(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java index 56a5b08810be..7114a8411578 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java @@ -75,7 +75,7 @@ public Status isAgentAlive(Host agent) { return answer.getResult() ? Status.Up : Status.Down; } } catch (Exception e) { - logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug(String.format("Failed to send command to host: %s", neighbor)); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java index 461e141fa3db..3885e06e7408 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java @@ -242,7 +242,7 @@ private void configureDomainRouterNicsAndDetails(VirtualMachineProfile vm, Virtu String mac = networkModel.getNextAvailableMacAddressInNetwork(networkId); nicTo.setMac(mac); } catch (InsufficientAddressCapacityException e) { - throw new CloudRuntimeException("unable to allocate mac address on network: " + networkId); + throw new CloudRuntimeException(String.format("unable to allocate mac address on network %s with id %d", network, networkId)); } nicTo.setDns1(publicNicProfile.getIPv4Dns1()); nicTo.setDns2(publicNicProfile.getIPv4Dns2()); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 580d44a09d61..1be381dcd54c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -176,7 +176,7 @@ public VmwareServerDiscoverer() { if (hosts != null && hosts.size() > 0) { int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); if (hosts.size() >= maxHostsPerCluster) { - String msg = "VMware cluster " + cluster.getName() + " is too big to add new host, current size: " + hosts.size() + ", max. size: " + maxHostsPerCluster; + String msg = String.format("VMware cluster %s is too big to add new host, current size: %d, max. size: %d", cluster, hosts.size(), maxHostsPerCluster); logger.error(msg); throw new DiscoveredWithErrorException(msg); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java index 1e260b4f99b5..c99d7d4d7075 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -493,8 +493,8 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy boolean createSnapshot, String nfsVersion, String configuration) throws Exception { String secondaryMountPoint = mountService.getMountPoint(secondaryStorageUrl, nfsVersion); - logger.info(String.format("Init copy of template [name: %s, path in secondary storage: %s, configuration: %s] in secondary storage [url: %s, mount point: %s] to primary storage.", - templateName, templatePathAtSecondaryStorage, configuration, secondaryStorageUrl, secondaryMountPoint)); + logger.info(String.format("Init copy of template [uuid: %s, name: %s, path in secondary storage: %s, configuration: %s] in secondary storage [url: %s, mount point: %s] to primary storage.", + templateUuid, templateName, templatePathAtSecondaryStorage, configuration, secondaryStorageUrl, secondaryMountPoint)); String srcOVAFileName = VmwareStorageLayoutHelper.getTemplateOnSecStorageFilePath(secondaryMountPoint, templatePathAtSecondaryStorage, templateName, @@ -2534,7 +2534,7 @@ public Answer deleteVolume(DeleteCommand cmd) { if (vmMo != null) { if (logger.isInfoEnabled()) { if (deployAsIs) { - logger.info("Destroying root volume " + vol.getPath() + " of deploy-as-is VM " + vmName); + logger.info(String.format("Destroying root volume %s of deploy-as-is VM %s", vol, vmName)); } else { logger.info("Destroy root volume and VM itself. vmName " + vmName); } @@ -2585,7 +2585,7 @@ public Answer deleteVolume(DeleteCommand cmd) { } } else if (deployAsIs) { if (logger.isInfoEnabled()) { - logger.info("Destroying root volume " + vol.getPath() + " of already removed deploy-as-is VM " + vmName); + logger.info(String.format("Destroying root volume %s of already removed deploy-as-is VM %s", vol, vmName)); } // The disks of the deploy-as-is VM have been detached from the VM and moved to root folder String deployAsIsRootDiskPath = dsMo.searchFileInSubFolders(vol.getPath() + VmwareResource.VMDK_EXTENSION, @@ -3859,7 +3859,7 @@ public Pair getSyncedVolume(VirtualMachineMO vmMo, VmwareConte String volumePath = volumeTO.getPath(); if (!file.getFileBaseName().equalsIgnoreCase(volumePath)) { if (logger.isInfoEnabled()) { - logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumePath + " -> " + file.getFileBaseName()); + logger.info(String.format("Detected disk-chain top file change on volume: %s -> %s", volumeTO, file.getFileBaseName())); } volumePathChangeObserved = true; volumePath = file.getFileBaseName(); @@ -3871,7 +3871,7 @@ public Pair getSyncedVolume(VirtualMachineMO vmMo, VmwareConte if (diskDatastoreMoFromVM != null) { String actualPoolUuid = diskDatastoreMoFromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); if (!actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) { - logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumePath, actualPoolUuid)); + logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumeTO, actualPoolUuid)); datastoreChangeObserved = true; volumeTO.setDataStoreUuid(actualPoolUuid); volumeTO.setChainInfo(_gson.toJson(matchingExistingDisk)); diff --git a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java index caf28e849a0e..76807b89d0f7 100644 --- a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java +++ b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java @@ -196,8 +196,7 @@ private String handleManagedVolumePreMigration(VolumeInfo volumeInfo, StoragePoo final Answer answer = agentMgr.easySend(destHost.getId(), cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to CreateStoragePoolCommand)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = String.format("Error interacting with host %s (related to CreateStoragePoolCommand)%s", destHost, (answer != null && StringUtils.isNotBlank(answer.getDetails())) ? ": " + answer.getDetails() : ""); logger.error(errMsg); @@ -238,8 +237,7 @@ private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHos final Answer answer = agentMgr.easySend(srcHost.getId(), cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = String.format("Error interacting with host %s (related to DeleteStoragePoolCommand)%s", srcHost, (answer != null && StringUtils.isNotBlank(answer.getDetails())) ? ": " + answer.getDetails() : ""); logger.error(errMsg); @@ -281,8 +279,8 @@ private void handleManagedVolumesAfterFailedMigration(Map final Answer answer = agentMgr.easySend(destHost.getId(), cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to handleManagedVolumesAfterFailedMigration)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = String.format("Error interacting with host %s (related to handleManagedVolumesAfterFailedMigration)%s", + destHost, answer != null && (StringUtils.isNotBlank(answer.getDetails())) ? ": " + answer.getDetails() : ""); logger.error(errMsg); @@ -346,7 +344,7 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!receiveAnswer.getResult()) { - logger.error("Migration with storage of vm " + vm + " failed. Details: " + receiveAnswer.getDetails()); + logger.error(String.format("Migration with storage of vm %s to host %s failed. Details: %s", vm, destHost, receiveAnswer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } @@ -362,7 +360,7 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine } else if (!sendAnswer.getResult()) { handleManagedVolumesAfterFailedMigration(volumeToPool, destHost); - logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails()); + logger.error(String.format("Migration with storage of vm %s failed to host %s. Details: %s", vm, destHost, sendAnswer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } @@ -370,10 +368,10 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command); if (answer == null) { - logger.error("Migration with storage of vm " + vm + " failed."); + logger.error(String.format("Migration with storage of vm %s to host %s failed.", vm, destHost)); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error(String.format("Migration with storage of vm %s to host %s failed. Details: %s", vm, destHost, answer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else { // Update the volume details after migration. @@ -403,10 +401,10 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto); MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)agentMgr.send(destHost.getId(), command); if (answer == null) { - logger.error("Migration with storage of vm " + vm + " failed."); + logger.error(String.format("Migration with storage of vm %s to host %s failed.", vm, destHost)); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error(String.format("Migration with storage of vm %s to host %s failed. Details: %s", vm, destHost, answer.getDetails())); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails()); } else { // Update the volume details after migration. diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java index 3c1f161dd202..d5725e88a3cf 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java @@ -267,7 +267,7 @@ public void onPublishMessage(String senderAddress, String subject, Object args) try { final Account account = accountDao.findByIdIncludingRemoved((Long) args); if(!removeUserAccount(account)) { - logger.warn(String.format("Failed to remove account to Cloudian while removing CloudStack account=%s, id=%s", account.getAccountName(), account.getId())); + logger.warn("Failed to remove account to Cloudian while removing CloudStack account {}", account); } } catch (final Exception e) { logger.error("Caught exception while removing account in Cloudian: ", e); @@ -281,7 +281,7 @@ public void onPublishMessage(String senderAddress, String subject, Object args) try { final Domain domain = domainDao.findById((Long) args); if (!addGroup(domain)) { - logger.warn(String.format("Failed to add group in Cloudian while adding CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); + logger.warn("Failed to add group in Cloudian while adding CloudStack domain {}", domain); } } catch (final Exception e) { logger.error("Caught exception adding domain/group in Cloudian: ", e); @@ -295,7 +295,7 @@ public void onPublishMessage(String senderAddress, String subject, Object args) try { final DomainVO domain = (DomainVO) args; if (!removeGroup(domain)) { - logger.warn(String.format("Failed to remove group in Cloudian while removing CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); + logger.warn("Failed to remove group in Cloudian while removing CloudStack domain {}", domain); } } catch (final Exception e) { logger.error("Caught exception while removing domain/group in Cloudian: ", e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index be64b15c7ba3..477eb257deef 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -316,16 +316,16 @@ private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { return false; } if (networkOffering.getState() == NetworkOffering.State.Disabled) { - logger.warn(String.format("Network offering ID: %s is not enabled", networkOffering.getUuid())); + logger.warn("Network offering: {} is not enabled", networkOffering); return false; } List services = networkOfferingServiceMapDao.listServicesForNetworkOffering(networkOffering.getId()); if (services == null || services.isEmpty() || !services.contains("SourceNat")) { - logger.warn(String.format("Network offering ID: %s does not have necessary services to provision Kubernetes cluster", networkOffering.getUuid())); + logger.warn("Network offering: {} does not have necessary services to provision Kubernetes cluster", networkOffering); return false; } if (!networkOffering.isEgressDefaultPolicy()) { - logger.warn(String.format("Network offering ID: %s has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering.getUuid())); + logger.warn("Network offering: {} has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering); return false; } boolean offeringAvailableForZone = false; @@ -337,7 +337,7 @@ private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { } } if (!offeringAvailableForZone) { - logger.warn(String.format("Network offering ID: %s is not available for zone ID: %s", networkOffering.getUuid(), zone.getUuid())); + logger.warn("Network offering: {} is not available for zone: {}", networkOffering, zone); return false; } long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType()); @@ -387,15 +387,15 @@ protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose pu Integer startPort = rule.getSourcePortStart(); Integer endPort = rule.getSourcePortEnd(); if (logger.isDebugEnabled()) { - logger.debug(String.format("Validating rule with purpose: %s for network: %s with ports: %d-%d", purpose.toString(), network.getUuid(), startPort, endPort)); + logger.debug("Validating rule with purpose: {} for network: {} with ports: {}-{}", purpose.toString(), network, startPort, endPort); } if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for API access", network.getUuid(), purpose.toString().toLowerCase())); + throw new InvalidParameterValueException(String.format("Network: %s has conflicting %s rules to provision Kubernetes cluster for API access", network, purpose.toString().toLowerCase())); } int expectedSshStart = KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT; int expectedSshEnd = expectedSshStart + clusterTotalNodeCount - 1; if (Math.max(expectedSshStart, startPort) <= Math.min(expectedSshEnd, endPort)) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for node VM SSH access", network.getUuid(), purpose.toString().toLowerCase())); + throw new InvalidParameterValueException(String.format("Network: %s has conflicting %s rules to provision Kubernetes cluster for node VM SSH access", network, purpose.toString().toLowerCase())); } } } @@ -521,10 +521,10 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Checking host ID: %s for capacity already reserved %d", hostVO.getUuid(), reserved)); + logger.debug("Checking host: {} for capacity already reserved {}", hostVO, reserved); } if (capacityManager.checkIfHostHasCapacity(hostVO, cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - logger.debug("Found host ID == '{}' to have enough capacity, CPU={} RAM={}", hostVO.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); + logger.debug("Found host {} to have enough capacity, CPU={} RAM={}", hostVO, cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(hostVO, reserved)); suitable_host_found = true; planCluster = cluster; @@ -533,19 +533,19 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin } if (!suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d with offering ID: %s", zone.getUuid(), i, offering.getUuid())); + logger.info("Suitable hosts not found in datacenter: {} for node {} with offering: {}", zone, i, offering); } break; } } if (suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid())); + logger.info("Suitable hosts found in datacenter: {}, creating deployment destination", zone); } return new DeployDestination(zone, null, planCluster, null); } - String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering ID: %s", - cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getUuid()); + String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering: %s", + cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering); logger.warn(msg); throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } @@ -872,7 +872,7 @@ private Network getKubernetesClusterNetworkIfMissing(final String clusterName, f PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); if (logger.isInfoEnabled()) { - logger.info(String.format("Creating network for account ID: %s from the network offering ID: %s as part of Kubernetes cluster: %s deployment process", owner.getUuid(), networkOffering.getUuid(), clusterName)); + logger.info("Creating network for account: {} from the network offering: {} as part of Kubernetes cluster: {} deployment process", owner, networkOffering, clusterName); } CallContext networkContext = CallContext.register(CallContext.current(), ApiCommandResourceType.Network); @@ -1146,7 +1146,7 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve try { return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao); } catch (NoTransitionException nte) { - logger.warn(String.format("Failed to transition state of the Kubernetes cluster : %s in state %s on event %s", kubernetesCluster.getName(), kubernetesCluster.getState().toString(), e.toString()), nte); + logger.warn("Failed to transition state of the Kubernetes cluster: {} in state {} on event {}", kubernetesCluster, kubernetesCluster.getState().toString(), e.toString(), nte); return false; } } @@ -1261,7 +1261,7 @@ public KubernetesClusterVO doInTransaction(TransactionStatus status) { addKubernetesClusterDetails(cluster, defaultNetwork, cmd); if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); + logger.info("Kubernetes cluster {} has been created", cluster); } CallContext.current().putContextParameter(KubernetesCluster.class, cluster.getUuid()); return cluster; @@ -1352,19 +1352,18 @@ public boolean startKubernetesCluster(KubernetesClusterVO kubernetesCluster, boo accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); if (kubernetesCluster.getState().equals(KubernetesCluster.State.Running)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is in running state", kubernetesCluster.getName())); + logger.debug("Kubernetes cluster {} is in running state", kubernetesCluster); } return true; } if (kubernetesCluster.getState().equals(KubernetesCluster.State.Starting)) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is already in starting state", kubernetesCluster.getName())); - } + if (logger.isDebugEnabled()) + logger.debug("Kubernetes cluster {} is already in starting state", kubernetesCluster); return true; } final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (zone == null) { - logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); + logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster %s", kubernetesCluster)); } KubernetesClusterStartWorker startWorker = new KubernetesClusterStartWorker(kubernetesCluster, this); @@ -1425,13 +1424,13 @@ public boolean stopKubernetesCluster(StopKubernetesClusterCmd cmd) throws CloudR accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is already stopped", kubernetesCluster.getName())); + logger.debug("Kubernetes cluster: {} is already stopped", kubernetesCluster); } return true; } if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopping)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Kubernetes cluster : %s is getting stopped", kubernetesCluster.getName())); + logger.debug("Kubernetes cluster: {} is getting stopped", kubernetesCluster); } return true; } @@ -1787,20 +1786,20 @@ public void reallyRun() { List kubernetesClusters = kubernetesClusterDao.findKubernetesClustersToGarbageCollect(); for (KubernetesCluster kubernetesCluster : kubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster garbage collector on Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Running Kubernetes cluster garbage collector on Kubernetes cluster: {}", kubernetesCluster); } try { KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); destroyWorker = ComponentContext.inject(destroyWorker); if (destroyWorker.destroy()) { if (logger.isInfoEnabled()) { - logger.info(String.format("Garbage collection complete for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Garbage collection complete for Kubernetes cluster: {}", kubernetesCluster); } } else { - logger.warn(String.format("Garbage collection failed for Kubernetes cluster : %s, it will be attempted to garbage collected in next run", kubernetesCluster.getName())); + logger.warn("Garbage collection failed for Kubernetes cluster : {}, it will be attempted to garbage collected in next run", kubernetesCluster); } } catch (CloudRuntimeException e) { - logger.warn(String.format("Failed to destroy Kubernetes cluster : %s during GC", kubernetesCluster.getName()), e); + logger.warn("Failed to destroy Kubernetes cluster : {} during GC", kubernetesCluster, e); // proceed further with rest of the Kubernetes cluster garbage collection } } @@ -1844,14 +1843,14 @@ public void reallyRun() { List runningKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Running); for (KubernetesCluster kubernetesCluster : runningKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {}", kubernetesCluster); } try { if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } @@ -1859,14 +1858,14 @@ public void reallyRun() { List stoppedKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Stopped); for (KubernetesCluster kubernetesCluster : stoppedKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Stopped.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Stopped.toString()); } try { if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } @@ -1874,7 +1873,7 @@ public void reallyRun() { List alertKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Alert); for (KubernetesClusterVO kubernetesCluster : alertKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Alert.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Alert.toString()); } try { if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { @@ -1887,7 +1886,7 @@ public void reallyRun() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } @@ -1900,7 +1899,7 @@ public void reallyRun() { continue; } if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Starting.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Starting.toString()); } try { if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { @@ -1909,20 +1908,20 @@ public void reallyRun() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster: {} status scanner", kubernetesCluster, e); } } List destroyingKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Destroying); for (KubernetesCluster kubernetesCluster : destroyingKubernetesClusters) { if (logger.isInfoEnabled()) { - logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Destroying.toString())); + logger.info("Running Kubernetes cluster state scanner on Kubernetes cluster: {} for state: {}", kubernetesCluster, KubernetesCluster.State.Destroying.toString()); } try { KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); destroyWorker = ComponentContext.inject(destroyWorker); destroyWorker.destroy(); } catch (Exception e) { - logger.warn(String.format("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster : {} status scanner", kubernetesCluster, e); } } } @@ -1940,8 +1939,8 @@ boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualM // check cluster is running at desired capacity include control nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s", - clusterVMs.size(), kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), state.toString())); + logger.debug("Found only {} VMs in the Kubernetes cluster {} while expected {} VMs to be in state: {}", + clusterVMs.size(), kubernetesCluster, kubernetesCluster.getTotalNodeCount(), state.toString()); } return false; } @@ -1950,8 +1949,9 @@ boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualM VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(clusterVm.getVmId()); if (vm.getState() != state) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Found VM : %s in the Kubernetes cluster : %s in state: %s while expected to be in state: %s. So moving the cluster to Alert state for reconciliation", - vm.getUuid(), kubernetesCluster.getName(), vm.getState().toString(), state.toString())); + logger.debug("Found VM: {} in the Kubernetes cluster {} in state: {} while " + + "expected to be in state: {}. So moving the cluster to Alert state for reconciliation", + vm, kubernetesCluster, vm.getState().toString(), state.toString()); } return false; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 270916aab7e4..01268f421110 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -29,6 +29,7 @@ import javax.persistence.GenerationType; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "kubernetes_cluster") @@ -402,6 +403,13 @@ public KubernetesClusterVO(String name, String description, long zoneId, long ku this.maxSize = maxSize; } + @Override + public String toString() { + return String.format("KubernetesCluster %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + @Override public Class getEntityType() { return KubernetesCluster.class; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java index 50d7fb14085a..fc80c3001810 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java @@ -76,8 +76,8 @@ private void validateClusterSate() { || kubernetesCluster.getState().equals(KubernetesCluster.State.Alert) || kubernetesCluster.getState().equals(KubernetesCluster.State.Error) || kubernetesCluster.getState().equals(KubernetesCluster.State.Destroying))) { - String msg = String.format("Cannot perform delete operation on cluster : %s in state: %s", - kubernetesCluster.getName(), kubernetesCluster.getState()); + String msg = String.format("Cannot perform delete operation on cluster %s in state: %s", + kubernetesCluster, kubernetesCluster.getState()); logger.warn(msg); throw new PermissionDeniedException(msg); } @@ -100,15 +100,16 @@ private boolean destroyClusterVMs() { try { UserVm vm = userVmService.destroyVm(vmID, true); if (!userVmManager.expunge(userVM)) { - logger.warn(String.format("Unable to expunge VM %s : %s, destroying Kubernetes cluster will probably fail", - vm.getInstanceName() , vm.getUuid())); + logger.warn("Unable to expunge VM {}, destroying Kubernetes cluster will probably fail", vm); } kubernetesClusterVmMapDao.expunge(clusterVM.getId()); if (logger.isInfoEnabled()) { - logger.info(String.format("Destroyed VM : %s as part of Kubernetes cluster : %s cleanup", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Destroyed VM {} as part of Kubernetes cluster : {} cleanup", vm, kubernetesCluster); } } catch (ResourceUnavailableException | ConcurrentOperationException e) { - logger.warn(String.format("Failed to destroy VM : %s part of the Kubernetes cluster : %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getDisplayName(), kubernetesCluster.getName()), e); + logger.warn("Failed to destroy VM {} part of the Kubernetes cluster {} " + + "cleanup. Moving on with destroying remaining resources provisioned " + + "for the Kubernetes cluster", userVM, kubernetesCluster, e); return false; } finally { CallContext.unregister(); @@ -132,13 +133,12 @@ private void destroyKubernetesClusterNetwork() throws ManagementServerException ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true); if (!networkDestroyed) { - String msg = String.format("Failed to destroy network : %s as part of Kubernetes cluster : %s cleanup", network.getName(), kubernetesCluster.getName()); + String msg = String.format("Failed to destroy network: %s as part of Kubernetes cluster: %s cleanup", network, kubernetesCluster); logger.warn(msg); throw new ManagementServerException(msg); } if (logger.isInfoEnabled()) { - logger.info(String.format("Destroyed network : %s as part of Kubernetes cluster : %s cleanup", - network.getName(), kubernetesCluster.getName())); + logger.info("Destroyed network: {} as part of Kubernetes cluster: {} cleanup", network, kubernetesCluster); } } } @@ -270,11 +270,11 @@ public boolean destroy() throws CloudRuntimeException { } } } else { - logger.error(String.format("Failed to find network for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.error("Failed to find network for Kubernetes cluster : {}", kubernetesCluster); } } if (logger.isInfoEnabled()) { - logger.info(String.format("Destroying Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Destroying Kubernetes cluster : {}", kubernetesCluster); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.DestroyRequested); boolean vmsDestroyed = destroyClusterVMs(); @@ -285,7 +285,7 @@ public boolean destroy() throws CloudRuntimeException { try { destroyKubernetesClusterNetwork(); } catch (ManagementServerException e) { - String msg = String.format("Failed to destroy network of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); + String msg = String.format("Failed to destroy network of Kubernetes cluster: %s cleanup", kubernetesCluster); logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); @@ -294,7 +294,7 @@ public boolean destroy() throws CloudRuntimeException { try { checkForRulesToDelete(); } catch (ManagementServerException e) { - String msg = String.format("Failed to remove network rules of Kubernetes cluster : %s", kubernetesCluster.getName()); + String msg = String.format("Failed to remove network rules of Kubernetes cluster: %s", kubernetesCluster); logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); @@ -302,14 +302,14 @@ public boolean destroy() throws CloudRuntimeException { try { releaseVpcTierPublicIpIfNeeded(); } catch (InsufficientAddressCapacityException e) { - String msg = String.format("Failed to release public IP for VPC tier used by Kubernetes cluster : %s", kubernetesCluster.getName()); + String msg = String.format("Failed to release public IP for VPC tier used by Kubernetes cluster: %s", kubernetesCluster); logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); } } } else { - String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); + String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster: %s cleanup", kubernetesCluster); logger.warn(msg); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg); @@ -319,12 +319,12 @@ public boolean destroy() throws CloudRuntimeException { kubernetesClusterDetailsDao.removeDetails(kubernetesCluster.getId()); boolean deleted = kubernetesClusterDao.remove(kubernetesCluster.getId()); if (!deleted) { - logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster : %s", kubernetesCluster.getName()), null); + logMessage(Level.WARN, String.format("Failed to delete Kubernetes cluster: %s", kubernetesCluster), null); updateKubernetesClusterEntryForGC(); return false; } if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster : %s is successfully deleted", kubernetesCluster.getName())); + logger.info("Kubernetes cluster: {} is successfully deleted", kubernetesCluster); } return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index aa500de91902..de85e6231f2d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -164,9 +164,9 @@ private void scaleKubernetesClusterVpcTierRules(final List clusterVMIds) t */ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) throws ManagementServerException { if (manager.isDirectAccess(network)) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network or ROUTED network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); - } + if (logger.isDebugEnabled()) + logger.debug("Network: {} for Kubernetes cluster: {} is not an isolated network " + + "or ROUTED network, therefore, no need for network rules", network, kubernetesCluster); return; } if (network.getVpcId() != null) { @@ -206,7 +206,7 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { - logger.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); + logger.warn("Draining node: {} on VM: {} in Kubernetes cluster: {} unsuccessful", hostName, userVm, kubernetesCluster); } else { result = SshHelper.sshExecute(ipAddress, port, getControlNodeLoginUser(), pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), @@ -214,18 +214,18 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po if (result.first()) { return true; } else { - logger.warn(String.format("Deleting node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); + logger.warn("Deleting node: {} on VM: {} in Kubernetes cluster: {} unsuccessful", hostName, userVm, kubernetesCluster); } } break; } catch (Exception e) { - String msg = String.format("Failed to remove Kubernetes cluster : %s node: %s on VM : %s", kubernetesCluster.getName(), hostName, userVm.getDisplayName()); + String msg = String.format("Failed to remove Kubernetes cluster: %s node: %s on VM: %s", kubernetesCluster, hostName, userVm); logger.warn(msg, e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - logger.error(String.format("Error while waiting for Kubernetes cluster : %s node: %s on VM : %s removal", kubernetesCluster.getName(), hostName, userVm.getDisplayName()), ie); + logger.error("Error while waiting for Kubernetes cluster: {} node: {} on VM: {} removal", kubernetesCluster, hostName, userVm, ie); } retryCounter++; } @@ -316,9 +316,11 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); - logger.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName())); + logger.info("Removing vm {} from cluster {}", userVM, kubernetesCluster); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes" + + " cluster %s, failed to remove Kubernetes node: %s running on VM : %s", + kubernetesCluster, userVM.getHostName(), userVM), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } CallContext vmContext = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); @@ -327,17 +329,18 @@ private void removeNodesFromCluster(List vmMaps) throw UserVm vm = userVmService.destroyVm(userVM.getId(), true); if (!userVmManager.expunge(userVM)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to expunge VM '%s'." - , kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + , kubernetesCluster, vm), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (ResourceUnavailableException e) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to remove VM ID: %s", - kubernetesCluster.getName() , userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + kubernetesCluster, userVM), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } finally { CallContext.unregister(); } kubernetesClusterVmMapDao.expunge(vmMapVO.getId()); if (System.currentTimeMillis() > scaleTimeoutTime) { - logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", + kubernetesCluster), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } @@ -346,7 +349,9 @@ private void removeNodesFromCluster(List vmMaps) throw List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes " + + "cluster %s, unable to update network rules", kubernetesCluster), + kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } } @@ -437,13 +442,13 @@ private boolean isAutoscalingChanged() { public boolean scaleCluster() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Scaling Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Scaling Kubernetes cluster {}", kubernetesCluster); } scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000; final long originalClusterSize = kubernetesCluster.getNodeCount(); final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); if (existingServiceOffering == null) { - logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName())); + logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster)); } final boolean autoscalingChanged = isAutoscalingChanged(); final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 4fefa54a6d98..ab3121f207b7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -85,8 +85,7 @@ private void upgradeKubernetesClusterNodes() { } Pair result; if (logger.isInfoEnabled()) { - logger.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", - vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + logger.info("Upgrading node on VM {} in Kubernetes cluster {} with Kubernetes version {}", vm, kubernetesCluster, upgradeVersion); } String errorMessage = String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()); for (int retry = KubernetesClusterService.KubernetesClusterUpgradeRetries.value(); retry >= 0; retry--) { @@ -149,8 +148,7 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get Kubernetes node on VM : %s upgraded to version %s", kubernetesCluster.getName(), vm.getDisplayName(), upgradeVersion.getSemanticVersion()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } if (logger.isInfoEnabled()) { - logger.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", - vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + logger.info("Successfully upgraded node on VM {} in Kubernetes cluster {} with Kubernetes version {}", vm, kubernetesCluster, upgradeVersion); } } } @@ -158,18 +156,18 @@ private void upgradeKubernetesClusterNodes() { public boolean upgradeCluster() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Upgrading Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Upgrading Kubernetes cluster: {}", kubernetesCluster); } upgradeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterUpgradeTimeout.value() * 1000; Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); publicIpAddress = publicIpSshPort.first(); sshPort = publicIpSshPort.second(); if (StringUtils.isEmpty(publicIpAddress)) { - logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve associated public IP", kubernetesCluster.getName())); + logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster: %s, unable to retrieve associated public IP", kubernetesCluster)); } clusterVMs = getKubernetesClusterVMs(); if (CollectionUtils.isEmpty(clusterVMs)) { - logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve VMs for cluster", kubernetesCluster.getName())); + logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster: %s, unable to retrieve VMs for cluster", kubernetesCluster)); } retrieveScriptFiles(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index 7a3268014fdb..74e8b0c9b23e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -59,7 +59,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber return true; } if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster : %s. Output: %s", nodeName, kubernetesCluster.getName(), result.second())); + LOGGER.debug(String.format("Failed to retrieve status for node: %s in Kubernetes cluster: %s. Output: %s", nodeName, kubernetesCluster, result.second())); } return false; } @@ -72,7 +72,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber try { ready = isKubernetesClusterNodeReady(kubernetesCluster, ipAddress, port, user, sshKeyFile, nodeName); } catch (Exception e) { - LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster : %s", nodeName, kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to retrieve state of node: %s in Kubernetes cluster: %s", nodeName, kubernetesCluster), e); } if (ready) { return true; @@ -80,7 +80,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster : %s node: %s to become ready", kubernetesCluster.getName(), nodeName), ie); + LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s node: %s to become ready", kubernetesCluster, nodeName), ie); } } return false; @@ -120,14 +120,14 @@ public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kube return true; } } catch (Exception e) { - LOGGER.warn(String.format("Failed to uncordon node: %s on VM ID : %s in Kubernetes cluster : %s", - hostName, userVm.getUuid(), kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to uncordon node: %s on VM %s in Kubernetes cluster %s", + hostName, userVm, kubernetesCluster), e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster : %s node: %s on VM : %s", - kubernetesCluster.getName(), hostName, userVm.getUuid()), ie); + LOGGER.warn(String.format("Error while waiting for uncordon Kubernetes cluster %s node: %s on VM %s", + kubernetesCluster, hostName, userVm), ie); } } return false; @@ -150,14 +150,14 @@ public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesClu lines) { if (line.contains(serviceName) && line.contains("Running")) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Service : %s in namespace: %s for the Kubernetes cluster : %s is running", serviceName, namespace, kubernetesCluster.getName())); + LOGGER.debug(String.format("Service %s in namespace %s for the Kubernetes cluster %s is running", serviceName, namespace, kubernetesCluster)); } return true; } } } } catch (Exception e) { - LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster : %s", serviceName, namespace, kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Unable to retrieve service: %s running status in namespace %s for Kubernetes cluster %s", serviceName, namespace, kubernetesCluster), e); } return false; } @@ -169,11 +169,11 @@ public static boolean isKubernetesClusterDashboardServiceRunning(final Kubernete // Check if dashboard service is up running. while (System.currentTimeMillis() < timeoutTime) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster : %s to come up", kubernetesCluster.getName())); + LOGGER.debug(String.format("Checking dashboard service for the Kubernetes cluster: %s to come up", kubernetesCluster)); } if (isKubernetesClusterAddOnServiceRunning(kubernetesCluster, ipAddress, port, user, sshKeyFile, "kubernetes-dashboard", "kubernetes-dashboard")) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Dashboard service for the Kubernetes cluster : %s is in running state", kubernetesCluster.getName())); + LOGGER.info(String.format("Dashboard service for the Kubernetes cluster %s is in running state", kubernetesCluster)); } running = true; break; @@ -181,7 +181,7 @@ public static boolean isKubernetesClusterDashboardServiceRunning(final Kubernete try { Thread.sleep(waitDuration); } catch (InterruptedException ex) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster: %s API dashboard service to be available", kubernetesCluster.getName()), ex); + LOGGER.error(String.format("Error while waiting for Kubernetes cluster %s API dashboard service to be available", kubernetesCluster), ex); } } return running; @@ -201,11 +201,11 @@ public static String getKubernetesClusterConfig(final KubernetesCluster kubernet break; } else { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster : %s. Output: %s", kubernetesCluster.getName(), result.second())); + LOGGER.info(String.format("Failed to retrieve kube-config file for Kubernetes cluster: %s. Output: %s", kubernetesCluster, result.second())); } } } catch (Exception e) { - LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster : %s", kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to retrieve kube-config file for Kubernetes cluster %s", kubernetesCluster), e); } } return kubeConfig; @@ -221,7 +221,7 @@ public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster ku return Integer.parseInt(result.second().trim().replace("\"", "")); } else { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster : %s. Output: %s", kubernetesCluster.getName(), result.second())); + LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster %s. Output: %s", kubernetesCluster, result.second())); } } return 0; @@ -241,18 +241,18 @@ public static boolean isKubernetesClusterServerRunning(final KubernetesCluster k String versionOutput = br.lines().collect(Collectors.joining()); if (StringUtils.isNotEmpty(versionOutput)) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s API has been successfully provisioned, %s", kubernetesCluster.getName(), versionOutput)); + LOGGER.info(String.format("Kubernetes cluster %s API has been successfully provisioned, %s", kubernetesCluster, versionOutput)); } k8sApiServerSetup = true; break; } } catch (Exception e) { - LOGGER.warn(String.format("API endpoint for Kubernetes cluster : %s not available", kubernetesCluster.getName()), e); + LOGGER.warn(String.format("API endpoint for Kubernetes cluster %s not available", kubernetesCluster), e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster : %s API endpoint to be available", kubernetesCluster.getName()), ie); + LOGGER.error(String.format("Error while waiting for Kubernetes cluster %s API endpoint to be available", kubernetesCluster), ie); } } return k8sApiServerSetup; @@ -266,11 +266,11 @@ public static boolean isKubernetesClusterControlVmRunning(final KubernetesCluste socket.connect(new InetSocketAddress(ipAddress, port), 10000); controlVmRunning = true; } catch (IOException e) { - LOGGER.info("Waiting for Kubernetes cluster : {} control node VMs to be accessible", kubernetesCluster.getName()); + LOGGER.info("Waiting for Kubernetes cluster {} control node VMs to be accessible", kubernetesCluster); try { Thread.sleep(10000); } catch (InterruptedException ex) { - LOGGER.warn("Error while waiting for Kubernetes cluster : {} control node VMs to be accessible", kubernetesCluster.getName(), ex); + LOGGER.warn("Error while waiting for Kubernetes cluster {} control node VMs to be accessible", kubernetesCluster, ex); } } } @@ -283,28 +283,28 @@ public static boolean validateKubernetesClusterReadyNodesCount(final KubernetesC final long timeoutTime, final long waitDuration) { while (System.currentTimeMillis() < timeoutTime) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster : %s with total %d provisioned nodes", kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount())); + LOGGER.debug(String.format("Checking ready nodes for the Kubernetes cluster %s with total %d provisioned nodes", kubernetesCluster, kubernetesCluster.getTotalNodeCount())); } try { int nodesCount = KubernetesClusterUtil.getKubernetesClusterReadyNodesCount(kubernetesCluster, ipAddress, port, user, sshKeyFile); if (nodesCount == kubernetesCluster.getTotalNodeCount()) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s has %d ready nodes now", kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount())); + LOGGER.info(String.format("Kubernetes cluster %s has %d ready nodes now", kubernetesCluster, kubernetesCluster.getTotalNodeCount())); } return true; } else { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Kubernetes cluster : %s has total %d provisioned nodes while %d ready now", kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), nodesCount)); + LOGGER.debug(String.format("Kubernetes cluster %s has total %d provisioned nodes while %d ready now", kubernetesCluster, kubernetesCluster.getTotalNodeCount(), nodesCount)); } } } catch (Exception e) { - LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster : %s", kubernetesCluster.getName()), e); + LOGGER.warn(String.format("Failed to retrieve ready node count for Kubernetes cluster %s", kubernetesCluster), e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ex) { - LOGGER.warn(String.format("Error while waiting during Kubernetes cluster : %s ready node check", kubernetesCluster.getName()), ex); + LOGGER.warn(String.format("Error while waiting during Kubernetes cluster %s ready node check", kubernetesCluster), ex); } } return false; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java index 455561b4020b..ea8fe3ca7fd1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesSupportedVersionVO.java @@ -30,6 +30,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "kubernetes_supported_version") @@ -85,6 +86,13 @@ public KubernetesSupportedVersionVO(String name, String semanticVersion, long is this.minimumRamSize = minimumRamSize; } + @Override + public String toString() { + return String.format("KubernetesSupportedVersion %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "semanticVersion")); + } + @Override public long getId() { return id; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 93e1ae2810a6..86247b53d32e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -152,7 +152,7 @@ private List filterKubernetesSupportedVersions(Li versions.remove(i); } } catch (IllegalArgumentException e) { - logger.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion)); + logger.warn("Unable to compare Kubernetes version for supported version {} with {}", version, minimumSemanticVersion); versions.remove(i); } } @@ -383,13 +383,13 @@ public boolean deleteKubernetesSupportedVersion(final DeleteKubernetesSupportedV VMTemplateVO template = templateDao.findByIdIncludingRemoved(version.getIsoId()); if (template == null) { - logger.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid())); + logger.warn("Unable to find ISO associated with supported Kubernetes version {}", version); } if (template != null && template.getRemoved() == null) { // Delete ISO try { deleteKubernetesVersionIso(template.getId()); } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException ex) { - logger.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex); + logger.error("Unable to delete binaries ISO: {} associated with supported kubernetes version: {}", template, version, ex); throw new CloudRuntimeException(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid())); } } diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index 6025a41d69cb..51524c129121 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -851,7 +851,7 @@ public List listManagementServerMetrics(List listBigSwitchBcfDevices(ListBigSwitchBcfDevice if (bigswitchBcfDeviceId != null) { BigSwitchBcfDeviceVO bigswitchBcfDevice = _bigswitchBcfDao.findById(bigswitchBcfDeviceId); if (bigswitchBcfDevice == null) { - throw new InvalidParameterValueException("Could not find BigSwitch controller with id: " + bigswitchBcfDevice); + throw new InvalidParameterValueException(String.format("Could not find BigSwitch controller with id: %d", bigswitchBcfDeviceId)); } responseList.add(bigswitchBcfDevice); } else { diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java index f9c11e507483..cefae2dcadde 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java @@ -153,12 +153,11 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use List devices = _bigswitchBcfDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - logger.error("No BigSwitch Controller on physical network " + physnet.getName()); + logger.error(String.format("No BigSwitch Controller on physical network %s", physnet)); return null; } for (BigSwitchBcfDeviceVO d: devices){ - logger.debug("BigSwitch Controller " + d.getUuid() - + " found on physical network " + physnet.getId()); + logger.debug(String.format("BigSwitch Controller %s found on physical network %s", d, physnet)); } logger.debug("Physical isolation type is BCF_SEGMENT, asking GuestNetworkGuru to design this network"); @@ -309,7 +308,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vlan || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } @@ -353,8 +352,7 @@ public boolean prepareMigration(NicProfile nic, Network network, tenantId = vpc.getUuid(); tenantName = vpc.getName(); boolean released = _vpcDao.releaseFromLockTable(vpc.getId()); - logger.debug("BCF guru release lock vpc id: " + vpc.getId() - + " released? " + released); + logger.debug(String.format("BCF guru release lock vpc: %s released? %s", vpc, released)); } else { // use network id in CS as tenant in BSN // use network uuid as tenant id for non-VPC networks diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java index daf9c1c4e08d..9c06efc6e402 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java @@ -136,18 +136,18 @@ public Provider getProvider() { } protected boolean canHandle(Network network, Service service) { - logger.debug("Checking if BrocadeVcsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if BrocadeVcsElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Vcs) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("BrocadeVcsElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("BrocadeVcsElement is not a provider for network %s", network)); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.BrocadeVcs)) { - logger.debug("BrocadeVcsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug(String.format("BrocadeVcsElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -164,7 +164,7 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("entering BrocadeVcsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug(String.format("entering BrocadeVcsElement implement function for network %s (state %s)", network, network.getState())); if (!canHandle(network, Service.Connectivity)) { return false; @@ -276,11 +276,9 @@ public BrocadeVcsDeviceVO addBrocadeVcsDevice(AddBrocadeVcsDeviceCmd cmd) { final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %sto add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %sto add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); } Map params = new HashMap(); diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java index 8d2125d70eb0..4fe75ba7519c 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java @@ -142,7 +142,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin if (answer == null || !answer.getResult()) { logger.error("CreateNetworkCommand failed"); - logger.error("Unable to create network " + network.getId()); + logger.error(String.format("Unable to create network %s", network)); return null; } @@ -179,7 +179,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D if (answer == null || !answer.getResult()) { logger.error("AssociateMacToNetworkCommand failed"); - throw new InsufficientVirtualNetworkCapacityException("Unable to associate mac " + interfaceMac + " to network " + network.getId(), DataCenter.class, dc.getId()); + throw new InsufficientVirtualNetworkCapacityException(String.format("Unable to associate mac %s to network %s", interfaceMac, network), DataCenter.class, dc.getId()); } } @@ -204,7 +204,7 @@ public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm if (answer == null || !answer.getResult()) { logger.error("DisassociateMacFromNetworkCommand failed"); - logger.error("Unable to disassociate mac " + interfaceMac + " from network " + network.getId()); + logger.error(String.format("Unable to disassociate mac %s from network %s", interfaceMac, network)); return; } } @@ -232,7 +232,7 @@ public boolean trash(Network network, NetworkOffering offering) { if (brocadeVcsNetworkVlanMapping != null) { vlanTag = brocadeVcsNetworkVlanMapping.getVlanId(); } else { - logger.error("Not able to find vlanId for network " + network.getId()); + logger.error(String.format("Not able to find vlanId for network %s", network)); return false; } @@ -250,7 +250,7 @@ public boolean trash(Network network, NetworkOffering offering) { if (answer == null || !answer.getResult()) { logger.error("DeleteNetworkCommand failed"); - logger.error("Unable to delete network " + network.getId()); + logger.error(String.format("Unable to delete network %s", network)); return false; } } diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java index 0f843d7f4d13..ea2cf19715b2 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.cisco; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.UUID; import javax.persistence.Column; @@ -62,6 +64,13 @@ public CiscoVnmcControllerVO(long hostId, long physicalNetworkId, String provide this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("CiscoVnmcController %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } + @Override public long getId() { return id; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java index 9b96bd4138d6..2ec05f679add 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java @@ -513,11 +513,11 @@ public CiscoVnmcController addCiscoVnmcResource(AddCiscoVnmcResourceCmd cmd) { final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", + networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", + ntwkSvcProvider.getProviderName(), physicalNetwork)); } if (_ciscoVnmcDao.listByPhysicalNetwork(physicalNetworkId).size() != 0) { @@ -589,7 +589,7 @@ public boolean deleteCiscoVnmcResource(DeleteCiscoVnmcResourceCmd cmd) { if (physicalNetwork != null) { List responseList = _ciscoAsa1000vDao.listByPhysicalNetwork(physicalNetworkId); if (responseList.size() > 0) { - throw new CloudRuntimeException("Cisco VNMC appliance with id " + vnmcResourceId + " cannot be deleted as there Cisco ASA 1000v appliances using it"); + throw new CloudRuntimeException(String.format("Cisco VNMC appliance %s cannot be deleted as there Cisco ASA 1000v appliances using it", vnmcResource)); } } @@ -616,7 +616,7 @@ public List listCiscoVnmcResources(ListCiscoVnmcResources if (ciscoVnmcResourceId != null) { CiscoVnmcControllerVO ciscoVnmcResource = _ciscoVnmcDao.findById(ciscoVnmcResourceId); if (ciscoVnmcResource == null) { - throw new InvalidParameterValueException("Could not find Cisco Vnmc device with id: " + ciscoVnmcResource); + throw new InvalidParameterValueException(String.format("Could not find Cisco Vnmc device with id: %d", ciscoVnmcResourceId)); } responseList.add(ciscoVnmcResource); } else { @@ -874,7 +874,7 @@ public boolean deleteCiscoAsa1000vResource(DeleteCiscoAsa1000vResourceCmd cmd) { NetworkAsa1000vMapVO networkAsaMap = _networkAsa1000vMapDao.findByAsa1000vId(asaResource.getId()); if (networkAsaMap != null) { - throw new CloudRuntimeException("Cisco ASA 1000v appliance with id " + asaResourceId + " cannot be deleted as it is associated with guest network"); + throw new CloudRuntimeException(String.format("Cisco ASA 1000v appliance %s cannot be deleted as it is associated with guest network", asaResource)); } _ciscoAsa1000vDao.remove(asaResourceId); diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java index c1ea7823811b..12269bf7cf7f 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -72,7 +72,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan private boolean canHandle(Network network, List rules) { if (network.getGuestType() != Network.GuestType.Shared || network.getTrafficType() != TrafficType.Guest) { - logger.debug("Not handling network with type " + network.getGuestType() + " and traffic type " + network.getTrafficType()); + logger.debug(String.format("Not handling network %s with type %s and traffic type %s", network, network.getGuestType(), network.getTrafficType())); return false; } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index c7f4b8bf2443..6c7b88afd896 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -247,7 +247,7 @@ public boolean applyLoadBalancerRules(Network network, List r DomainRouterVO elbVm = findElbVmForLb(rules.get(0)); if (elbVm == null) { - logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network " + network.getId()); + logger.warn(String.format("Unable to apply lb rules, ELB vm doesn't exist in the network %s", network)); throw new ResourceUnavailableException("Unable to apply lb rules", DataCenter.class, network.getDataCenterId()); } @@ -267,10 +267,10 @@ public boolean applyLoadBalancerRules(Network network, List r } return applyLBRules(elbVm, lbRules, network.getId()); } else if (elbVm.getState() == State.Stopped || elbVm.getState() == State.Stopping) { - logger.debug("ELB VM is in " + elbVm.getState() + ", so not sending apply LoadBalancing rules commands to the backend"); + logger.debug(String.format("ELB VM %s is in %s, so not sending apply LoadBalancing rules commands to the backend", elbVm, elbVm.getState())); return true; } else { - logger.warn("Unable to apply loadbalancing rules, ELB VM is not in the right state " + elbVm.getState()); + logger.warn(String.format("Unable to apply loadbalancing rules, ELB VM %s is not in the right state %s", elbVm, elbVm.getState())); throw new ResourceUnavailableException("Unable to apply loadbalancing rules, ELB VM is not in the right state", VirtualRouter.class, elbVm.getId()); } } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java index 714855934656..45afa58a1a07 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java @@ -27,6 +27,7 @@ import javax.inject.Inject; +import com.cloud.network.dao.PhysicalNetworkDao; import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -124,6 +125,8 @@ public class LoadBalanceRuleHandler { @Inject private IpAddressManager _ipAddrMgr; @Inject + private PhysicalNetworkDao physicalNetworkDao; + @Inject protected NetworkDao _networkDao; @Inject protected NetworkOfferingDao _networkOfferingDao; @@ -230,7 +233,7 @@ private DomainRouterVO deployELBVm(Network guestNetwork, final DeployDestination guestNetwork = _networkDao.acquireInLockTable(guestNetworkId); if (guestNetwork == null) { - throw new ConcurrentOperationException("Unable to acquire network lock: " + guestNetworkId); + throw new ConcurrentOperationException(String.format("Unable to acquire network lock: %s", guestNetwork)); } try { @@ -272,11 +275,12 @@ private DomainRouterVO deployELBVm(Network guestNetwork, final DeployDestination final Long physicalNetworkId = _networkModel.getPhysicalNetworkId(guestNetwork); final PhysicalNetworkServiceProvider provider = _physicalProviderDao.findByServiceProvider(physicalNetworkId, typeString); if (provider == null) { - throw new CloudRuntimeException("Cannot find service provider " + typeString + " in physical network " + physicalNetworkId); + throw new CloudRuntimeException(String.format("Cannot find service provider %s in physical network %s with id %d", + typeString, physicalNetworkDao.findById(physicalNetworkId), physicalNetworkId)); } final VirtualRouterProvider vrProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), Type.ElasticLoadBalancerVm); if (vrProvider == null) { - throw new CloudRuntimeException("Cannot find virtual router provider " + typeString + " as service provider " + provider.getId()); + throw new CloudRuntimeException(String.format("Cannot find virtual router provider %s as service provider %s", typeString, provider)); } long userId = CallContext.current().getCallingUserId(); diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index 0a9b4a7131a3..27a06899e014 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -347,8 +347,7 @@ public boolean applyLBRules(Network network, List rules) thro //2.3 Apply Internal LB rules on the VM if (!_internalLbMgr.applyLoadBalancingRules(network, entry.getValue(), internalLbVms)) { - throw new CloudRuntimeException("Failed to apply load balancing rules for ip " + sourceIp.addr() + - " in network " + network.getId() + " on element " + getName()); + throw new CloudRuntimeException(String.format("Failed to apply load balancing rules for ip %s in network %s on element %s", sourceIp.addr(), network, getName())); } } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index 9a5c5a7c6a99..9469af7eb750 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -300,7 +300,7 @@ public boolean finalizeStart(final VirtualMachineProfile profile, final long hos if (answer != null && answer instanceof GetDomRVersionAnswer) { final GetDomRVersionAnswer versionAnswer = (GetDomRVersionAnswer)answer; if (answer == null || !answer.getResult()) { - logger.warn("Unable to get the template/scripts version of internal LB VM " + internalLbVm.getInstanceName() + " due to: " + versionAnswer.getDetails()); + logger.warn(String.format("Unable to get the template/scripts version of internal LB VM %s due to: %s", internalLbVm, versionAnswer.getDetails())); result = false; } else { internalLbVm.setTemplateVersion(versionAnswer.getTemplateVersion()); @@ -606,11 +606,11 @@ protected List findOrDeployInternalLbVm(final Network guestNetwo List internalLbVms = new ArrayList(); final Network lock = _networkDao.acquireInLockTable(guestNetwork.getId(), NetworkOrchestrationService.NetworkLockTimeout.value()); if (lock == null) { - throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId()); + throw new ConcurrentOperationException(String.format("Unable to lock network %s", guestNetwork)); } if (logger.isDebugEnabled()) { - logger.debug("Lock is acquired for network id " + lock.getId() + " as a part of internal lb startup in " + dest); + logger.debug(String.format("Lock is acquired for network %s as a part of internal lb startup in %s", lock, dest)); } final long internalLbProviderId = getInternalLbProviderId(guestNetwork); @@ -647,7 +647,7 @@ protected List findOrDeployInternalLbVm(final Network guestNetwo if (lock != null) { _networkDao.releaseFromLockTable(lock.getId()); if (logger.isDebugEnabled()) { - logger.debug("Lock is released for network id " + lock.getId() + " as a part of internal lb vm startup in " + dest); + logger.debug(String.format("Lock is released for network id %s as a part of internal lb vm startup in %s", lock, dest)); } } } @@ -665,7 +665,7 @@ protected long getInternalLbProviderId(final Network guestNetwork) { final VirtualRouterProvider internalLbProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), type); if (internalLbProvider == null) { - throw new CloudRuntimeException("Cannot find provider " + type.toString() + " as service provider " + provider.getId()); + throw new CloudRuntimeException(String.format("Cannot find provider %s as service provider %s", type.toString(), provider)); } return internalLbProvider.getId(); @@ -880,10 +880,10 @@ public boolean applyLoadBalancingRules(final Network network, final List> getCapabilities() { @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("NetworkElement implement: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug(String.format("NetworkElement implement: %s, traffic type: %s", network, network.getTrafficType())); if (network.getTrafficType() == TrafficType.Guest) { - logger.debug("ignore network " + network.getName()); + logger.debug(String.format("ignore network %s", network)); return true; } VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); @@ -145,15 +145,13 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin public boolean prepare(Network network, NicProfile nicProfile, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("NetworkElement prepare: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug(String.format("NetworkElement prepare: %s, traffic type: %s", network, network.getTrafficType())); if (network.getTrafficType() == TrafficType.Guest) { - logger.debug("ignore network " + network.getName()); + logger.debug(String.format("ignore network %s", network)); return true; } - logger.debug("network: " + network.getId()); - VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { @@ -210,7 +208,7 @@ public boolean release(Network network, NicProfile nicProfile, VirtualMachinePro if (network.getTrafficType() == TrafficType.Guest) { return true; } else if (!_manager.isManagedPhysicalNetwork(network)) { - logger.debug("release ignore network " + network.getId()); + logger.debug(String.format("release ignore network %s", network)); return true; } @@ -219,7 +217,7 @@ public boolean release(Network network, NicProfile nicProfile, VirtualMachinePro VirtualMachineModel vmModel = _manager.getDatabase().lookupVirtualMachine(vm.getUuid()); if (vmModel == null) { - logger.debug("vm " + vm.getInstanceName() + " not in local database"); + logger.debug(String.format("vm %s not in local database", vm)); return true; } VMInterfaceModel vmiModel = vmModel.getVMInterface(nic.getUuid()); @@ -272,7 +270,7 @@ public boolean isReady(PhysicalNetworkServiceProvider provider) { List systemNets = _manager.findSystemNetworks(types); if (systemNets != null && !systemNets.isEmpty()) { for (NetworkVO net: systemNets) { - logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + logger.debug(String.format("update system network service: %s; service provider: %s", net, serviceMap)); _networksDao.update(net.getId(), net, serviceMap); } } else { @@ -284,7 +282,7 @@ public boolean isReady(PhysicalNetworkServiceProvider provider) { systemNets = _manager.findSystemNetworks(types); if (systemNets != null && !systemNets.isEmpty()) { for (NetworkVO net: systemNets) { - logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + logger.debug(String.format("update system network service: %s; service provider: %s", net, serviceMap)); _networksDao.update(net.getId(), net, serviceMap); } } else { diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java index 345cdc1e6c6c..9346abab21da 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java @@ -132,7 +132,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use network.setCidr(userSpecified.getCidr()); network.setGateway(userSpecified.getGateway()); } - logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network {}{}", userSpecified, network.getCidr() == null ? "" : " subnet: " + network.getCidr()); return network; } @@ -144,7 +144,7 @@ public void setup(Network network, long networkId) { @Override public Network implement(Network network, NetworkOffering offering, DeployDestination destination, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - logger.debug("Implement network: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("Implement network: {}, traffic type: {}", network, network.getTrafficType()); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { @@ -191,7 +191,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin @Override public NicProfile allocate(Network network, NicProfile profile, VirtualMachineProfile vm) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - logger.debug("allocate NicProfile on " + network.getName()); + logger.debug(String.format("allocate NicProfile on %s", network)); if (profile != null && profile.getRequestedIPv4() != null) { throw new CloudRuntimeException("Does not support custom ip allocation at this time: " + profile); @@ -218,8 +218,8 @@ public NicProfile allocate(Network network, NicProfile profile, VirtualMachinePr @Override public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - logger.debug("reserve NicProfile on network id: " + network.getId() + " " + network.getName()); - logger.debug("deviceId: " + nic.getDeviceId()); + logger.debug("reserve NicProfile on network: " + network); + logger.debug(String.format("nic: %s deviceId: %d", nic, nic.getDeviceId())); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -275,7 +275,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D if (nic.getMacAddress() == null) { MacAddressesType macs = vmi.getMacAddresses(); if (macs == null) { - logger.debug("no mac address is allocated for Nic " + nicVO.getUuid()); + logger.debug(String.format("no mac address is allocated for Nic %s", nicVO)); } else { logger.info("VMI " + _manager.getVifNameByVmUuid(vm.getUuid(), nicVO.getDeviceId()) + " got mac address: " + macs.getMacAddress().get(0)); nic.setMacAddress(macs.getMacAddress().get(0)); @@ -299,7 +299,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { - logger.debug("release NicProfile " + nic.getId()); + logger.debug(String.format("release NicProfile %s", nic)); return true; } @@ -309,7 +309,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat */ @Override public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - logger.debug("deallocate NicProfile " + nic.getId() + " on " + network.getName()); + logger.debug(String.format("deallocate NicProfile %s on %s", nic, network.getName())); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -343,7 +343,7 @@ public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm @Override public void updateNicProfile(NicProfile profile, Network network) { // TODO Auto-generated method stub - logger.debug("update NicProfile " + profile.getId() + " on " + network.getName()); + logger.debug(String.format("update NicProfile %s on %s", profile, network)); } @Override diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java index dc453f75c150..29d0b3a51e8e 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java @@ -121,7 +121,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use network.setCidr(_mgmtCidr); network.setGateway(_mgmtGateway); } - logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network " + userSpecified + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); return network; } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java index 479ef2a0e5db..7a1833302d69 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java @@ -79,7 +79,7 @@ public void build(ModelController controller, VMInstanceVO instance) { setProperties(controller, instance); UserVm userVm = controller.getVmDao().findById(instance.getId()); if (userVm != null && userVm.getUserData() != null) { - logger.debug("vm " + instance.getInstanceName() + " user data: " + userVm.getUserData()); + logger.debug(String.format("vm %s user data: %s", instance, userVm.getUserData())); final Gson json = new Gson(); Map kvmap = json.fromJson(userVm.getUserData(), new TypeToken>() { }.getType()); @@ -99,7 +99,7 @@ public void build(ModelController controller, VMInstanceVO instance) { } else { // Throw a CloudRuntimeException in case the UUID is not valid. String message = "Invalid UUID ({0}) given for the service-instance for VM {1}."; - message = MessageFormat.format(message, instance.getId(), serviceUuid); + message = MessageFormat.format(message, serviceUuid, instance); logger.warn(message); throw new CloudRuntimeException(message); } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java index 08a4609c43ef..c27c57516e0c 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java @@ -413,7 +413,8 @@ public boolean verify(ModelController controller) { diff.removeAll(vncSubnets); if (!diff.isEmpty()) { - logger.debug("Subnets changed, network: " + _name + "; db: " + dbSubnets + ", vnc: " + vncSubnets + ", diff: " + diff); + logger.debug(String.format("Subnets changed, network: [id: %d, uuid: %s, name: %s]; " + + "db: %s, vnc: %s, diff: %s", _id, _uuid, _name, dbSubnets, vncSubnets, diff)); return false; } @@ -500,7 +501,7 @@ public boolean compare(ModelController controller, ModelObject o) { diff.removeAll(newSubnets); if (!diff.isEmpty()) { - logger.debug("Subnets differ, network: " + _name + "; db: " + currentSubnets + ", vnc: " + newSubnets + ", diff: " + diff); + logger.debug(String.format("Subnets differ, network: [id: %d, uuid: %s, name: %s]; db: %s, vnc: %s, diff: %s", _id, _uuid, _name, currentSubnets, newSubnets, diff)); return false; } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java index 48b9006f34c1..b693e06ad8f0 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java @@ -327,16 +327,14 @@ public boolean manageGuestNetworkWithNetscalerControlCenter(boolean add, Network // allocate a load balancer device for the network lbDeviceVO = allocateNCCResourceForNetwork(guestConfig); if (lbDeviceVO == null) { - String msg = "failed to allocate Netscaler ControlCenter Resource for the zone in the network " - + guestConfig.getId(); + String msg = String.format("failed to allocate Netscaler ControlCenter Resource for the zone in the network %s", guestConfig); logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } netscalerControlCenter = _hostDao.findById(lbDeviceVO.getId()); - logger.debug("Allocated Netscaler Control Center device:" + lbDeviceVO.getId() + " for the network: " - + guestConfig.getId()); + logger.debug(String.format("Allocated Netscaler Control Center device:%s for the network: %s", lbDeviceVO, guestConfig)); } else { // find the load balancer device allocated for the network diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java index 7b2ef012bed6..299341161491 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java @@ -269,13 +269,13 @@ public Map deployNsVpx(Account owner, DeployDestination dest, De ServiceOfferingVO vpxOffering = _serviceOfferingDao.findById(svcOffId); //using 2GB and 2CPU offering if(vpxOffering.getRamSize() < 2048 && vpxOffering.getCpu() <2 ) { - throw new InvalidParameterValueException("Specified Service Offering :" + vpxOffering.getUuid() + " NS Vpx cannot be deployed. Min 2GB Ram and 2 CPU are required"); + throw new InvalidParameterValueException(String.format("Specified Service Offering :%s NS Vpx cannot be deployed. Min 2GB Ram and 2 CPU are required", vpxOffering)); } long userId = CallContext.current().getCallingUserId(); //TODO change the os bits from 142 103 to the actual guest of bits if(template.getGuestOSId() != 103 ) { - throw new InvalidParameterValueException("Specified Template " + template.getUuid()+ " not suitable for NS VPX Deployment. Please register the template with guest os type as unknow(64-bit)"); + throw new InvalidParameterValueException(String.format("Specified Template %s not suitable for NS VPX Deployment. Please register the template with guest os type as unknown(64-bit)", template)); } NetworkVO defaultNetwork = null; diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java index 356b452a9e48..278d058a2892 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java @@ -187,18 +187,18 @@ public Provider getProvider() { } protected boolean canHandle(Network network, Service service) { - logger.debug("Checking if NiciraNvpElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if NiciraNvpElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Lswitch) { return false; } if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("NiciraNvpElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("NiciraNvpElement is not a provider for network %s", network)); return false; } if (!ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.NiciraNvp)) { - logger.debug("NiciraNvpElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug(String.format("NiciraNvpElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -215,7 +215,7 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("entering NiciraNvpElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug(String.format("entering NiciraNvpElement implement function for network %s (state %s)", network, network.getState())); if (!canHandle(network, Service.Connectivity)) { return false; @@ -276,7 +276,7 @@ else if (network.getGuestType().equals(GuestType.Isolated) && networkModel.isPro context.getAccount().getAccountName()); CreateLogicalRouterAnswer answer = (CreateLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to create Logical Router for network " + network.getDisplayText()); + logger.error(String.format("Failed to create Logical Router for network %s", network)); return false; } @@ -313,7 +313,7 @@ private boolean sharedNetworkSupportUUIDVlanId(Network network, String lSwitchUu new ConfigureSharedNetworkUuidCommand(lRouterUuid, lSwitchUuid, portIpAddress, ownerName, network.getId()); ConfigureSharedNetworkUuidAnswer answer = (ConfigureSharedNetworkUuidAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to configure Logical Router for Shared network " + network.getDisplayText()); + logger.error(String.format("Failed to configure Logical Router for Shared network %s", network)); return false; } return true; @@ -332,7 +332,7 @@ private boolean sharedNetworkSupportNumericalVlanId(Network network, String lSwi new ConfigureSharedNetworkVlanIdCommand(lSwitchUuid, l2GatewayServiceUuid , vlanId, ownerName, network.getId()); ConfigureSharedNetworkVlanIdAnswer answer = (ConfigureSharedNetworkVlanIdAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to configure Shared network " + network.getDisplayText()); + logger.error(String.format("Failed to configure Shared network %s", network)); return false; } } @@ -431,7 +431,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm NiciraNvpNicMappingVO nicMap = niciraNvpNicMappingDao.findByNicUuid(nicVO.getUuid()); if (nicMap == null) { - logger.error("No mapping for nic " + nic.getName()); + logger.error(String.format("No mapping for nic %s", nic)); return false; } @@ -470,7 +470,7 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle // nat rules. NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.warn("No logical router uuid found for network " + network.getDisplayText()); + logger.warn(String.format("No logical router uuid found for network %s", network)); // This might be cause by a failed deployment, so don't make shutdown fail as well. return true; } @@ -478,7 +478,7 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle DeleteLogicalRouterCommand cmd = new DeleteLogicalRouterCommand(routermapping.getLogicalRouterUuid()); DeleteLogicalRouterAnswer answer = (DeleteLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - logger.error("Failed to delete LogicalRouter for network " + network.getDisplayText()); + logger.error(String.format("Failed to delete LogicalRouter for network %s", network)); return false; } @@ -582,11 +582,9 @@ public NiciraNvpDeviceVO addNiciraNvpDevice(AddNiciraNvpDeviceCmd cmd) { final PhysicalNetworkServiceProviderVO ntwkSvcProvider = physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " + - physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); } if (niciraNvpDao.listByPhysicalNetwork(physicalNetworkId).size() != 0) { @@ -814,7 +812,7 @@ public boolean applyIps(Network network, List ipAddre NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error(String.format("No logical router uuid found for network %s", network)); return false; } @@ -858,7 +856,7 @@ public boolean applyStaticNats(Network network, List rules) NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error(String.format("No logical router uuid found for network %s", network)); return false; } @@ -898,7 +896,7 @@ public boolean applyPFRules(Network network, List rules) thr NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error(String.format("No logical router uuid found for network %s", network)); return false; } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java index daf2420b528c..d366169fcc7a 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java @@ -147,10 +147,10 @@ public Network design(final NetworkOffering offering, final DeploymentPlan plan, final List devices = niciraNvpDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - logger.error("No NiciraNvp Controller on physical network " + physnet.getName()); + logger.error("No NiciraNvp Controller on physical network {}", physnet); return null; } - logger.debug("Nicira Nvp " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Nicira Nvp {} found on physical network {}", devices.get(0).getUuid(), physnet); logger.debug("Physical isolation type is supported, asking GuestNetworkGuru to design this network"); final NetworkVO networkObject = (NetworkVO) super.design(offering, plan, userSpecified, name, vpcId, owner); @@ -276,7 +276,7 @@ public boolean release(final NicProfile nic, final VirtualMachineProfile vm, fin public void shutdown(final NetworkProfile profile, final NetworkOffering offering) { final NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Lswitch || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } @@ -308,7 +308,7 @@ private void sharedNetworksCleanup(NetworkVO networkObject, String logicalSwitch NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(networkObject.getId()); if (routermapping == null) { // Case 1: Numerical Vlan Provided -> No lrouter used. - logger.info("Shared Network " + networkObject.getDisplayText() + " didn't use Logical Router"); + logger.info(String.format("Shared Network %s didn't use Logical Router", networkObject)); } else { //Case 2: Logical Router's UUID provided as Vlan id -> Remove lrouter port but not lrouter. diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java index 7b4851fc285c..42aebf0182ac 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java @@ -107,10 +107,10 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - logger.error("No Controller on physical network " + physnet.getName()); + logger.error("No Controller on physical network {}", physnet); return null; } - logger.debug("Controller " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Controller {} found on physical network {}", devices.get(0).getUuid(), physnet); logger.debug("Physical isolation type is ODL, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, name, vpcId, owner); @@ -194,7 +194,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D AddHypervisorCommand addCmd = new AddHypervisorCommand(dest.getHost().getUuid(), dest.getHost().getPrivateIpAddress()); AddHypervisorAnswer addAnswer = (AddHypervisorAnswer)agentManager.easySend(controller.getHostId(), addCmd); if (addAnswer == null || !addAnswer.getResult()) { - logger.error("Failed to add " + dest.getHost().getName() + " as a node to the controller"); + logger.error(String.format("Failed to add %s as a node to the controller", dest.getHost())); throw new InsufficientVirtualNetworkCapacityException("Failed to add destination hypervisor to the OpenDaylight Controller", dest.getPod().getId()); } @@ -241,7 +241,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.OpenDaylight || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java index 8bf68f0c289b..a1a1fd5ea6cc 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java @@ -106,11 +106,11 @@ public OpenDaylightControllerVO addController(AddOpenDaylightControllerCmd cmd) final PhysicalNetworkServiceProviderVO ntwkSvcProvider = physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %sto add this device", + networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException("Network Service Provider: " + ntwkSvcProvider.getProviderName() + " is in shutdown state in the physical network: " - + physicalNetworkId + "to add this device"); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %sto add this device", + ntwkSvcProvider.getProviderName(), physicalNetwork)); } final Map hostParams = new HashMap(); diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java index 698919542642..b8f4e0c73ff2 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java @@ -114,22 +114,19 @@ public Provider getProvider() { } protected boolean canHandle(final Network network, final Service service) { - logger.debug("Checking if OvsElement can handle service " - + service.getName() + " on network " + network.getDisplayText()); + logger.debug(String.format("Checking if OvsElement can handle service %s on network %s", service.getName(), network)); if (network.getBroadcastDomainType() != BroadcastDomainType.Vswitch) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("OvsElement is not a provider for network " - + network.getDisplayText()); + logger.debug(String.format("OvsElement is not a provider for network %s", network)); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.Ovs)) { - logger.debug("OvsElement can't provide the " + service.getName() - + " service on network " + network.getDisplayText()); + logger.debug(String.format("OvsElement can't provide the %s service on network %s", service.getName(), network)); return false; } @@ -149,9 +146,7 @@ public boolean implement(final Network network, final NetworkOffering offering, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - logger.debug("entering OvsElement implement function for network " - + network.getDisplayText() + " (state " + network.getState() - + ")"); + logger.debug(String.format("entering OvsElement implement function for network %s (state %s)", network, network.getState())); if (!canHandle(network, Service.Connectivity)) { return false; @@ -437,9 +432,8 @@ public boolean applyIps(final Network network, final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual " - + "router doesn't exist in the network " - + network.getId()); + logger.debug(String.format("Virtual router element doesn't need to associate ip" + + " addresses on the backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -462,8 +456,8 @@ public boolean applyStaticNats(final Network network, final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Ovs element doesn't need to apply static nat on the backend; virtual " - + "router doesn't exist in the network " + network.getId()); + logger.debug(String.format("Ovs element doesn't need to apply static nat on the " + + "backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -485,8 +479,8 @@ public boolean applyPFRules(final Network network, final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Ovs element doesn't need to apply firewall rules on the backend; virtual " - + "router doesn't exist in the network " + network.getId()); + logger.debug(String.format("Ovs element doesn't need to apply firewall rules on the" + + " backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -511,9 +505,8 @@ public boolean applyLBRules(final Network network, final List final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - logger.debug("Virtual router elemnt doesn't need to apply load balancing rules on the backend; virtual " - + "router doesn't exist in the network " - + network.getId()); + logger.debug(String.format("Virtual router element doesn't need to apply load " + + "balancing rules on the backend; virtual router doesn't exist in the network %s", network)); return true; } @@ -523,7 +516,7 @@ public boolean applyLBRules(final Network network, final List for (final DomainRouterVO domainRouterVO : routers) { result = result && networkTopology.applyLoadBalancingRules(network, rules, domainRouterVO); if (!result) { - logger.debug("Failed to apply load balancing rules in network " + network.getId()); + logger.debug(String.format("Failed to apply load balancing rules in network %s", network)); } } } @@ -603,18 +596,13 @@ public static boolean validateHAProxyLBRule(final LoadBalancingRule rule) { } if (expire != null && !containsOnlyNumbers(expire, timeEndChar)) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " + rule.getId() - + " Cause: expire is not in timeformat: " - + expire); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: expire is not in time format: %s", + rule.getUuid(), expire)); } if (tablesize != null && !containsOnlyNumbers(tablesize, "kmg")) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " - + rule.getId() - + " Cause: tablesize is not in size format: " - + tablesize); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: table size is not in size format: %s", + rule.getUuid(), tablesize)); } } else if (StickinessMethodType.AppCookieBased.getName() @@ -634,18 +622,14 @@ public static boolean validateHAProxyLBRule(final LoadBalancingRule rule) { } if (length != null && !containsOnlyNumbers(length, null)) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " + rule.getId() - + " Cause: length is not a number: " - + length); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: length is not a number: %s", + rule.getUuid(), length)); } if (holdTime != null && !containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers( holdTime, null)) { - throw new InvalidParameterValueException( - "Failed LB in validation rule id: " + rule.getId() - + " Cause: holdtime is not in timeformat: " - + holdTime); + throw new InvalidParameterValueException(String.format("Failed LB in validation rule id: %s Cause: holdtime is not in time format: %s", + rule.getUuid(), holdTime)); } } } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java index 97531a915377..0a9eeea496f8 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java @@ -187,13 +187,12 @@ public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vswitch || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " - + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } if (profile.getBroadcastDomainType() == BroadcastDomainType.Vswitch ) { - logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug(String.format("Releasing vnet for the network %s", profile)); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java index c99a6fd5de3d..804f29c01b1e 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java @@ -265,9 +265,7 @@ private String getGreEndpointIP(Host host, Network nw) //for network with label on target host Commands fetchIfaceCmds = new Commands(new OvsFetchInterfaceCommand(physNetLabel)); - logger.debug("Ask host " + host.getId() + - " to retrieve interface for phy net with label:" + - physNetLabel); + logger.debug(String.format("Ask host %s to retrieve interface for phy net with label: %s", host, physNetLabel)); Answer[] fetchIfaceAnswers = _agentMgr.send(host.getId(), fetchIfaceCmds); //And finally save it for future use endpointIp = handleFetchInterfaceAnswer(fetchIfaceAnswers, host.getId()); @@ -318,7 +316,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { OvsTunnelNetworkVO ta = _tunnelNetworkDao.findByFromToNetwork(hostId, rh.longValue(), nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug(String.format("Attempting to create tunnel from: %s to: %d", host, rh)); if (ta == null) { createTunnelRecord(hostId, rh.longValue(), nw.getId(), key); } @@ -331,8 +329,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { hostId, nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + - rh.longValue() + " to:" + hostId); + logger.debug(String.format("Attempting to create tunnel from: %d to: %s", rh, host)); if (ta == null) { createTunnelRecord(rh.longValue(), hostId, nw.getId(), key); @@ -346,22 +343,19 @@ protected void checkAndCreateTunnel(Network nw, Host host) { try { String myIp = getGreEndpointIP(host, nw); if (myIp == null) - throw new GreTunnelException("Unable to retrieve the source " + "endpoint for the GRE tunnel." + "Failure is on host:" + host.getId()); + throw new GreTunnelException(String.format("Unable to retrieve the source endpoint for the GRE tunnel. Failure is on host: %s", host)); boolean noHost = true; for (Long i : toHostIds) { HostVO rHost = _hostDao.findById(i); String otherIp = getGreEndpointIP(rHost, nw); if (otherIp == null) throw new GreTunnelException( - "Unable to retrieve the remote " - + "endpoint for the GRE tunnel." - + "Failure is on host:" + rHost.getId()); + String.format("Unable to retrieve the remote endpoint for the GRE tunnel. Failure is on host: %s", rHost)); Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, nw.getId(), myIp, bridgeName, nw.getUuid())); - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + nw.getId()); - logger.debug("Ask host " + hostId - + " to create gre tunnel to " + i); + logger.debug(String.format("Attempting to create tunnel from %s to %s for the network %s", host, rHost, nw)); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", host, rHost)); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); noHost = false; @@ -372,8 +366,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { String otherIp = getGreEndpointIP(rHost, nw); Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), nw.getId(), otherIp, bridgeName, nw.getUuid())); - logger.debug("Ask host " + i + " to create gre tunnel to " - + hostId); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", rHost, host)); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); noHost = false; @@ -383,7 +376,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { // anyway. This will ensure VIF rules will be triggered if (noHost) { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, nw.getId())); - logger.debug("Ask host " + hostId + " to configure bridge for network:" + nw.getId()); + logger.debug(String.format("Ask host %s to configure bridge for network:%s", host, nw)); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } @@ -451,7 +444,7 @@ private void handleDestroyBridgeAnswer(Answer ans, long hostId, long networkId) _tunnelNetworkDao.releaseFromLockTable(lock.getId()); logger.debug(String.format("Destroy bridge for" + - "network %1$s successful", networkId)); + "network %1$s successful", lock)); } else { logger.debug(String.format("Destroy bridge for" + "network %1$s failed", networkId)); @@ -487,8 +480,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { if (p.getState().equals(OvsTunnel.State.Established.name())) { Command cmd= new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - logger.debug("Destroying tunnel to " + host.getId() + - " from " + p.getFrom()); + logger.debug(String.format("Destroying tunnel to %s from %d", host, p.getFrom())); Answer ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), p.getTo(), p.getNetworkId()); } @@ -497,7 +489,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { Command cmd = new OvsDestroyBridgeCommand(nw.getId(), generateBridgeNameForVpc(nw.getVpcId()), host.getId()); - logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug(String.format("Destroying bridge for network %s on host: %s", nw, host)); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); } catch (Exception e) { @@ -515,7 +507,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { int key = getGreKey(nw); String bridgeName = generateBridgeName(nw, key); Command cmd = new OvsDestroyBridgeCommand(nw.getId(), bridgeName, host.getId()); - logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug(String.format("Destroying bridge for network %s on host: %s", nw, host)); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); @@ -528,8 +520,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { if (p.getState().equals(OvsTunnel.State.Established.name())) { cmd = new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - logger.debug("Destroying tunnel to " + host.getId() + - " from " + p.getFrom()); + logger.debug(String.format("Destroying tunnel to %s from %d", host, p.getFrom())); ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), p.getTo(), p.getNetworkId()); @@ -565,8 +556,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { // since this is the first VM from the VPC being launched on the host, first setup the bridge try { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, null)); - logger.debug("Ask host " + hostId + " to create bridge for vpc " + vpcId + " and configure the " - + " bridge for distributed routing."); + logger.debug(String.format("Ask host %s to create bridge for vpc %d and configure the bridge for distributed routing.", host, vpcId)); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } catch (OperationTimedoutException | AgentUnavailableException e) { @@ -578,8 +568,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { cmd.setSequenceNumber(getNextRoutingPolicyUpdateSequenceNumber(vpcId)); if (!sendVpcRoutingPolicyChangeUpdate(cmd, hostId, bridgeName)) { - logger.debug("Failed to send VPC routing policy change update to host : " + hostId + - ". But moving on with sending the updates to the rest of the hosts."); + logger.debug(String.format("Failed to send VPC routing policy change update to host: %s. But moving on with sending the updates to the rest of the hosts.", host)); } } @@ -602,7 +591,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { tunnelRecord = _tunnelNetworkDao.findByFromToNetwork(hostId, rh.longValue(), vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug(String.format("Attempting to create tunnel from: %s to: %d", host, rh)); if (tunnelRecord == null) { createTunnelRecord(hostId, rh.longValue(), vpcNetwork.getId(), key); } @@ -613,7 +602,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { tunnelRecord = _tunnelNetworkDao.findByFromToNetwork(rh.longValue(), hostId, vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); + logger.debug(String.format("Attempting to create tunnel from: %d to: %s", rh, host)); if (tunnelRecord == null) { createTunnelRecord(rh.longValue(), hostId, vpcNetwork.getId(), key); } @@ -626,23 +615,18 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { try { String myIp = getGreEndpointIP(host, vpcNetwork); if (myIp == null) - throw new GreTunnelException("Unable to retrieve the source " + "endpoint for the GRE tunnel." - + "Failure is on host:" + host.getId()); + throw new GreTunnelException(String.format("Unable to retrieve the source endpoint for the GRE tunnel.Failure is on host: %s", host)); boolean noHost = true; for (Long i : toHostIds) { HostVO rHost = _hostDao.findById(i); String otherIp = getGreEndpointIP(rHost, vpcNetwork); if (otherIp == null) - throw new GreTunnelException( - "Unable to retrieve the remote endpoint for the GRE tunnel." - + "Failure is on host:" + rHost.getId()); + throw new GreTunnelException(String.format("Unable to retrieve the remote endpoint for the GRE tunnel. Failure is on host: %s", rHost)); Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, vpcNetwork.getId(), myIp, bridgeName, vpcNetwork.getUuid())); - logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " - + vpcNetwork.getId()); - logger.debug("Ask host " + hostId - + " to create gre tunnel to " + i); + logger.debug(String.format("Attempting to create tunnel from: %s to: %s for the network %s", host, rHost, vpcNetwork)); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", host, rHost)); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); } @@ -653,8 +637,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), vpcNetwork.getId(), otherIp, bridgeName, vpcNetwork.getUuid())); - logger.debug("Ask host " + i + " to create gre tunnel to " - + hostId); + logger.debug(String.format("Ask host %s to create gre tunnel to %s", rHost, host)); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java index 19bf0a339094..d4ee924858e7 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java @@ -150,7 +150,7 @@ private void persistNetworkServiceMapAvoidingDuplicates(Network network, if (networkServiceMapDao.canProviderSupportServiceInNetwork(network.getId(), service, provider)) { logger.debug(String.format("A mapping between the network, service and provider (%s, %s, %s) " + "already exists, skipping duplicated entry", - network.getId(), service.getName(), provider.getName())); + network, service.getName(), provider.getName())); return; } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java index 106cf5180c3d..1f4f98194dce 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java @@ -271,11 +271,10 @@ private static Map> initCapabil } protected boolean canHandle(Network network, Network.Service service) { - logger.debug("Checking if TungstenElement can handle service " + service.getName() + " on network " - + network.getDisplayText()); + logger.debug(String.format("Checking if TungstenElement can handle service %s on network %s", service.getName(), network)); if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("TungstenElement is not a provider for network " + network.getDisplayText()); + logger.debug(String.format("TungstenElement is not a provider for network %s", network)); return false; } @@ -661,8 +660,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm TungstenUtils.getPublicNetworkPolicyName(ipAddressVO.getId()), null, network.getUuid()); tungstenFabricUtils.sendTungstenCommand(deleteTungstenNetworkPolicyCommand, network.getDataCenterId()); } catch (IllegalArgumentException e) { - throw new CloudRuntimeException( - "Failing to expunge the vm from Tungsten-Fabric with the uuid " + vm.getUuid()); + throw new CloudRuntimeException(String.format("Failing to expunge the vm %s from Tungsten-Fabric", vm)); } } @@ -680,8 +678,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm TungstenCommand deleteVmCmd = new DeleteTungstenVmCommand(vm.getUuid()); tungstenFabricUtils.sendTungstenCommand(deleteVmCmd, network.getDataCenterId()); } catch (IllegalArgumentException e) { - throw new CloudRuntimeException( - "Failing to expunge the vm from Tungsten-Fabric with the uuid " + vm.getUuid()); + throw new CloudRuntimeException(String.format("Failing to expunge the vm %s from Tungsten-Fabric", vm)); } } } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java index 4d22806a139c..38eb6c34da2e 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java @@ -196,7 +196,7 @@ public void deallocate(Network config, NicProfile nic, VirtualMachineProfile vm) DeleteTungstenVmCommand cmd = new DeleteTungstenVmCommand(vm.getUuid()); tungstenFabricUtils.sendTungstenCommand(cmd, config.getDataCenterId()); } catch (IllegalArgumentException e) { - throw new CloudRuntimeException("Failing to expunge the vm from Tungsten-Fabric with the uuid " + vm.getUuid()); + throw new CloudRuntimeException(String.format("Failing to expunge the vm %s from Tungsten-Fabric", vm)); } } } @@ -290,7 +290,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin } } } catch (Exception ex) { - throw new CloudRuntimeException("unable to create Tungsten-Fabric network " + network.getUuid()); + throw new CloudRuntimeException(String.format("unable to create Tungsten-Fabric network %s", network)); } return implemented; } diff --git a/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java b/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java index fc92775c6972..ec654af2b32f 100644 --- a/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java +++ b/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java @@ -149,7 +149,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vxlan || networkObject.getBroadcastUri() == null) { - logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn(String.format("BroadcastUri is empty or incorrect for guest network %s", networkObject)); return; } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java index 60359dd2c266..3d4afcaf95c5 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java @@ -133,7 +133,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet capacityIops = capacityIops - Iops; if (capacityIops < 0) { - throw new CloudRuntimeException("IOPS not available. [pool:" + storagePool.getName() + "] [availiops:" + capacityIops + "] [requirediops:" + Iops + "]"); + throw new CloudRuntimeException(String.format("IOPS not available. [pool:%s] [availiops:%d] [requirediops:%d]", storagePool, capacityIops, Iops)); } String protocoltype = null; diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java index 38b8b0ecb7a8..63aee8c2dc3f 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java @@ -366,7 +366,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { if (!dataStoreVO.isManaged()) { boolean success = false; for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), primarystore); + success = createStoragePool(h, primarystore); if (success) { break; } @@ -393,8 +393,8 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { return true; } - private boolean createStoragePool(long hostId, StoragePool pool) { - logger.debug("creating pool " + pool.getName() + " on host " + hostId); + private boolean createStoragePool(Host host, StoragePool pool) { + logger.debug(String.format("creating pool %s on host %s", pool, host)); if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD @@ -403,17 +403,17 @@ private boolean createStoragePool(long hostId, StoragePool pool) { return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = agentMgr.easySend(hostId, cmd); + final Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer != null && answer.getResult()) { return true; } else { primaryDataStoreDao.expunge(pool.getId()); String msg = ""; if (answer != null) { - msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); + msg = String.format("Can not create storage pool through host %s due to %s", host, answer.getDetails()); logger.warn(msg); } else { - msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; + msg = String.format("Can not create storage pool through host %s due to CreateStoragePoolCommand returns null", host); logger.warn(msg); } throw new CloudRuntimeException(msg); diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java index d2307111a816..3946acc35d9c 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java @@ -105,20 +105,20 @@ public boolean hostConnect(long hostId, long poolId) { final Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command for pool %s", poolVO)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %s", poolVO, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST,pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to establish connection from storage head to storage pool %s due to %s", poolVO, answer.getDetails())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format("Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=%sHost=%s", poolVO, host); - logger.info("Connection established between " + pool + " host + " + hostId); + logger.info(String.format("Connection established between pool %s and host %s", pool, host)); return true; } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index 6423b07a909a..dcf84525748f 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -388,7 +388,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) { - String errMesg = "Error revoking access for Volume : " + dataObject.getId(); + String errMesg = String.format("Error revoking access for Volume : %s", dataObject); logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); } finally { @@ -588,7 +588,7 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { usedSpaceBytes += DateraUtil.gibToBytes(appInstance.getSize()); } } catch (DateraObject.DateraError dateraError) { - String errMesg = "Error getting used bytes for storage pool : " + storagePool.getId(); + String errMesg = String.format("Error getting used bytes for storage pool : %s", storagePool); logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); } @@ -723,7 +723,7 @@ private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { storagePoolDao.update(storagePoolId, storagePool); } catch (UnsupportedEncodingException | DateraObject.DateraError e) { - String errMesg = "Error deleting app instance for Volume : " + volumeInfo.getId(); + String errMesg = String.format("Error deleting app instance for Volume: %s", volumeInfo); logger.warn(errMesg, e); throw new CloudRuntimeException(errMesg); } @@ -826,8 +826,7 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { String iqnPath = DateraUtil.generateIqnPath(iqn); VolumeVO volumeVo = _volumeDao.findById(volumeInfo.getId()); - logger.debug("volume ID : " + volumeInfo.getId()); - logger.debug("volume uuid : " + volumeInfo.getUuid()); + logger.debug(String.format("volume: %s", volumeInfo)); volumeVo.set_iScsiName(iqnPath); volumeVo.setFolder(appInstance.getName()); @@ -970,7 +969,7 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection if (baseAppInstanceName == null) { throw new CloudRuntimeException( - "Unable to find a base volume to clone " + volumeInfo.getId() + " type " + dataType); + "Unable to find a base volume to clone " + volumeInfo.getUuid() + " type " + dataType); } // Clone the app Instance @@ -995,7 +994,7 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection } if (appInstance == null) { throw new CloudRuntimeException("Unable to create an app instance from snapshot or template " - + volumeInfo.getId() + " type " + dataType); + + volumeInfo.getUuid() + " type " + dataType); } logger.debug("Datera - Cloned " + baseAppInstanceName + " to " + clonedAppInstanceName); @@ -1114,7 +1113,7 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId templateIops, replicaCount, volumePlacement, ipPool); if (appInstance == null) { - throw new CloudRuntimeException("Unable to create Template volume " + templateInfo.getId()); + throw new CloudRuntimeException(String.format("Unable to create Template volume %s", templateInfo.getUuid())); } iqn = appInstance.getIqn(); @@ -1306,9 +1305,8 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, clusterId, podId, zoneId); if (allHosts.isEmpty()) { - throw new CloudRuntimeException("No host up to associate a storage pool with in zone: " + zoneId + " pod: " + podId + " cluster: " + clusterId); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in zone: %s pod: %s cluster: %s", + zoneDao.findById(zoneId), podDao.findById(podId), clusterDao.findById(clusterId))); } boolean success = false; @@ -342,7 +343,8 @@ private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, Str return; } else { if (answer != null) { - throw new InvalidParameterValueException(String.format("Provided vCenter server details does not match with the existing vCenter in zone: %s", zoneDao.findById(zoneId))); + throw new InvalidParameterValueException(String.format("Provided vCenter server details does not match with the existing vCenter in zone: %s", + zoneDao.findById(zoneId))); } else { logger.warn("Can not validate vCenter through host {} due to ValidateVcenterDetailsCommand returns null", h); } @@ -447,7 +449,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h poolHosts.add(host); } catch (StorageConflictException se) { primaryDataStoreDao.expunge(dataStore.getId()); - throw new CloudRuntimeException("Storage has already been added as local storage to host: " + host.getName()); + throw new CloudRuntimeException(String.format("Storage has already been added as local storage to host: %s", host)); } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); String reason = storageMgr.getStoragePoolMountFailureReason(e.getMessage()); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java index 42f7f82b24f3..8abd8fd8bd22 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java @@ -382,7 +382,7 @@ private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, Stri long vMaxIops = maxIops != null ? maxIops : 0; long newIops = vcIops + vMaxIops; capacityIops -= newIops; - logger.info("Current storagepool " + storagePool.getName() + " iops capacity: " + capacityIops); + logger.info(String.format("Current storagepool %s iops capacity: %d", storagePool, capacityIops)); storagePool.setCapacityIops(Math.max(0, capacityIops)); _storagePoolDao.update(storagePool.getId(), storagePool); } @@ -1198,7 +1198,7 @@ private Answer createAnswerAndPerstistDetails(DevelopersApi api, SnapshotInfo sn @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - logger.debug("Linstor: takeSnapshot with snapshot: " + snapshotInfo.getUuid()); + logger.debug(String.format("Linstor: takeSnapshot with snapshot: %s", snapshotInfo.getSnapshotVO())); final VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); final VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java index d74d04dbb429..b45953989b5a 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/LinstorPrimaryDataStoreLifeCycleImpl.java @@ -173,22 +173,22 @@ public DataStore initialize(Map dsInfos) { return dataStoreHelper.createPrimaryDataStore(parameters); } - protected boolean createStoragePool(long hostId, StoragePool pool) { - logger.debug("creating pool " + pool.getName() + " on host " + hostId); + protected boolean createStoragePool(Host host, StoragePool pool) { + logger.debug(String.format("creating pool %s on host %s", pool, host)); if (pool.getPoolType() != Storage.StoragePoolType.Linstor) { logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = _agentMgr.easySend(hostId, cmd); + final Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer != null && answer.getResult()) { return true; } else { _primaryDataStoreDao.expunge(pool.getId()); String msg = answer != null ? - "Can not create storage pool through host " + hostId + " due to " + answer.getDetails() : - "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; + String.format("Can not create storage pool %s through host %s due to %s", pool, host, answer.getDetails()) : + String.format("Can not create storage pool %s through host %s due to CreateStoragePoolCommand returns null", pool, host); logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -219,7 +219,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { List poolHosts = new ArrayList<>(); for (HostVO host : allHosts) { try { - createStoragePool(host.getId(), primaryDataStoreInfo); + createStoragePool(host, primaryDataStoreInfo); _storageMgr.connectHostToSharedPool(host, primaryDataStoreInfo.getId()); diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 04f9045f570d..ba23566c3fdd 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -215,7 +215,8 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } if (isRevokeAccessNotNeeded(dataObject)) { - logger.debug("Skipping revoke access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); + logger.debug("Skipping revoke access for Solidfire data object type: {} id: {} uuid: {}", + dataObject.getType(), dataObject.getId(), dataObject.getUuid()); return; } @@ -235,7 +236,8 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) throw new CloudRuntimeException(errMsg); } - logger.debug("Revoking access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); + logger.debug("Revoking access for Solidfire data object type: {} id: {} uuid: {}", + dataObject.getType(), dataObject.getId(), dataObject.getUuid()); try { SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); @@ -951,7 +953,7 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback availableBytes) { - throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not have enough space to expand the volume."); + throw new CloudRuntimeException(String.format("Storage pool %s does not have enough space to expand the volume.", storagePool)); } } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index f79db2d67829..482fa23096a8 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -389,7 +389,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { if (allHosts.isEmpty()) { primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId()); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", clusterDao.findById(primaryDataStoreInfo.getClusterId()))); } boolean success = false; @@ -403,7 +403,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } if (!success) { - throw new CloudRuntimeException("Unable to create storage in cluster " + primaryDataStoreInfo.getClusterId()); + throw new CloudRuntimeException("Unable to create storage in cluster " + clusterDao.findById(primaryDataStoreInfo.getClusterId())); } List poolHosts = new ArrayList<>(); @@ -419,7 +419,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + logger.warn("No host can access storage pool '{}' on cluster '{}'.", primaryDataStoreInfo, clusterDao.findById(primaryDataStoreInfo.getClusterId())); primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); @@ -470,9 +470,9 @@ private boolean createStoragePool(HostVO host, StoragePool storagePool) { final String msg; if (answer != null) { - msg = "Cannot create storage pool through host '" + hostId + "' due to the following: " + answer.getDetails(); + msg = String.format("Cannot create storage pool through host '%s' due to the following: %s", host, answer.getDetails()); } else { - msg = "Cannot create storage pool through host '" + hostId + "' due to CreateStoragePoolCommand returns null"; + msg = String.format("Cannot create storage pool through host '%s' due to CreateStoragePoolCommand returns null", host); } logger.warn(msg); @@ -558,23 +558,21 @@ public boolean deleteDataStore(DataStore dataStore) { final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd); if (answer != null && answer.getResult()) { - logger.info("Successfully deleted storage pool using Host ID " + host.getHostId()); - HostVO hostVO = hostDao.findById(host.getHostId()); - if (hostVO != null) { clusterId = hostVO.getClusterId(); hostId = hostVO.getId(); } - + logger.info("Successfully deleted storage pool using Host {} with ID {}", hostVO, host.getHostId()); break; } else { + HostVO hostVO = hostDao.findById(host.getHostId()); if (answer != null) { - logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult()); + logger.error("Failed to delete storage pool using Host {} with ID: {}: {}", hostVO, host.getHostId(), answer.getResult()); } else { - logger.error("Failed to delete storage pool using Host ID " + host.getHostId()); + logger.error("Failed to delete storage pool using Host {} with ID: {}", hostVO, host.getHostId()); } } } @@ -646,12 +644,12 @@ private void handleTargetsForVMware(long hostId, long storagePoolId) { cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); cmd.setRemoveAsync(true); - sendModifyTargetsCommand(cmd, hostId); + sendModifyTargetsCommand(cmd, host); } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; @@ -659,7 +657,7 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { logger.warn(msg); } else if (!answer.getResult()) { - String msg = "Unable to modify target on the following host: " + hostId; + String msg = String.format("Unable to modify target on the following host: %s", host); logger.warn(msg); } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index d84734283939..88bb487861df 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import com.cloud.host.Host; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -105,10 +106,10 @@ public boolean hostConnect(long hostId, long storagePoolId) { } if (host.getHypervisorType().equals(HypervisorType.XenServer)) { - handleXenServer(host.getClusterId(), host.getId(), storagePoolId); + handleXenServer(host.getClusterId(), host, storagePoolId); } else if (host.getHypervisorType().equals(HypervisorType.KVM)) { - handleKVM(hostId, storagePoolId); + handleKVM(host, storagePoolId); } return true; @@ -147,7 +148,7 @@ public boolean hostEnabled(long hostId) { return true; } - private void handleXenServer(long clusterId, long hostId, long storagePoolId) { + private void handleXenServer(long clusterId, Host host, long storagePoolId) { List storagePaths = getStoragePaths(clusterId, storagePoolId); StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); @@ -157,7 +158,7 @@ private void handleXenServer(long clusterId, long hostId, long storagePoolId) { cmd.setStoragePath(storagePath); - sendModifyStoragePoolCommand(cmd, storagePool, hostId); + sendModifyStoragePoolCommand(cmd, storagePool, host); } } @@ -181,17 +182,17 @@ private void handleVMware(HostVO host, boolean add, ModifyTargetsCommand.TargetT cmd.setTargetTypeToRemove(targetTypeToRemove); cmd.setRemoveAsync(true); - sendModifyTargetsCommand(cmd, host.getId()); + sendModifyTargetsCommand(cmd, host); } } } - private void handleKVM(long hostId, long storagePoolId) { + private void handleKVM(Host host, long storagePoolId) { StoragePool storagePool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); - sendModifyStoragePoolCommand(cmd, storagePool, hostId); + sendModifyStoragePoolCommand(cmd, storagePool, host); } private List getStoragePaths(long clusterId, long storagePoolId) { @@ -260,17 +261,15 @@ private List> getTargets(long clusterId, long storagePoolId) return targets; } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } if (!answer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; - - HostVO host = hostDao.findById(hostId); + String msg = String.format("Unable to modify targets on the following host: %s", host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); @@ -278,24 +277,23 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { } } - private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", storagePool)); } if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + String msg = String.format("Unable to attach storage pool %s to host %s", storagePool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + - " (" + storagePool.getId() + ")"); + throw new CloudRuntimeException(String.format("Unable to establish a connection from agent to storage pool %s due to %s", storagePool, answer.getDetails())); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : String.format("ModifyStoragePoolAnswer expected ; Pool = %s Host = %s", storagePool, host); - logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + logger.info("Connection established between storage pool {} and host + {}", storagePool, host); } } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java index 98c8bfb51c19..f746390085b8 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java @@ -25,6 +25,7 @@ import javax.inject.Inject; +import com.cloud.host.Host; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -187,23 +188,21 @@ private void handleVMware(HostVO host, boolean add, ModifyTargetsCommand.TargetT cmd.setTargetTypeToRemove(targetTypeToRemove); cmd.setRemoveAsync(true); - sendModifyTargetsCommand(cmd, host.getId()); + sendModifyTargetsCommand(cmd, host); } } } } - private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - Answer answer = agentMgr.easySend(hostId, cmd); + private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } if (!answer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; - - HostVO host = hostDao.findById(hostId); + String msg = String.format("Unable to modify targets on the following host: %s", host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), msg, msg); @@ -215,21 +214,22 @@ private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCo Answer answer = agentMgr.easySend(hostId, cmd); if (answer == null) { - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command for storage pool: " + storagePool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command for storage pool: %s", storagePool)); } + HostVO host = hostDao.findById(hostId); if (!answer.getResult()) { - String msg = "Unable to attach storage pool " + storagePool.getId() + " to the host " + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %s", storagePool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); throw new CloudRuntimeException(msg); } - assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " + - storagePool.getId() + "; Host = " + hostId; + assert (answer instanceof ModifyStoragePoolAnswer) : + String.format("ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = %s; Host = %s", storagePool, host); - logger.info("Connection established between storage pool " + storagePool + " and host " + hostId); + logger.info("Connection established between storage pool {} and host {}", storagePool, host); return (ModifyStoragePoolAnswer)answer; } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index 671431f41635..68f0ff4bf375 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -540,7 +540,7 @@ private static void handleVagForHost(SolidFireUtil.SolidFireConnection sfConnect if (sfVag.getInitiators().length < MAX_NUM_INITIATORS_PER_VAG) { if (!hostSupports_iScsi(host)) { - String errMsg = "Host with ID " + host.getId() + " does not support iSCSI."; + String errMsg = String.format("Host %s does not support iSCSI.", host); LOGGER.warn(errMsg); @@ -562,7 +562,7 @@ private static void handleVagForHost(SolidFireUtil.SolidFireConnection sfConnect if (numVags > 0) { if (!hostSupports_iScsi(host)) { - String errMsg = "Host with ID " + host.getId() + " does not support iSCSI."; + String errMsg = String.format("Host %s does not support iSCSI.", host); LOGGER.warn(errMsg); diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java index b3f49b015d1f..00746334e022 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java @@ -92,7 +92,8 @@ public CopyCmdAnswer execute(final StorPoolBackupSnapshotCommand cmd, final Libv return new CopyCmdAnswer(snapshot); } catch (final Exception e) { - final String error = String.format("Failed to backup snapshot with id [%s] with a pool %s, due to %s", cmd.getSourceTO().getId(), cmd.getSourceTO().getDataStore().getUuid(), e.getMessage()); + final String error = String.format("Failed to backup snapshot [id: %s, name: %s] with a pool with id %s, due to %s", + cmd.getSourceTO().getId(), cmd.getSourceTO().getName(), cmd.getSourceTO().getDataStore().getUuid(), e.getMessage()); SP_LOG(error); logger.debug(error); return new CopyCmdAnswer(cmd, e); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java index b696990c5336..3111b04e0c71 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java @@ -114,7 +114,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep if (!isCurrentVersionSupportsEverythingFromPrevious) { String msg = "The current StorPool driver does not support all functionality from the one before upgrade to CS"; StorPoolUtil.spLog("Storage pool [%s] is not connected to host [%s] because the functionality after the upgrade is not full", - poolId, hostId); + pool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); return false; } @@ -126,24 +126,23 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep boolean isPoolConnectedToTheHost = poolHost != null; if (answer == null) { - StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO.getName(), host.getName()); + StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO, host); deleteVolumeWhenHostCannotConnectPool(conn, volumeOnPool); removePoolOnHost(poolHost, isPoolConnectedToTheHost); - throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command%s", pool)); } if (!answer.getResult()) { - StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO.getName(), host.getName()); + StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO, host); removePoolOnHost(poolHost, isPoolConnectedToTheHost); if (answer.getDetails() != null && isStorPoolVolumeOrStorageNotExistsOnHost(answer)) { deleteVolumeWhenHostCannotConnectPool(conn, volumeOnPool); return false; } - String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; + String msg = String.format("Unable to attach storage pool %s to the host %s", pool, host); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + - pool.getId()); + throw new CloudRuntimeException(String.format("Unable establish connection from storage head to storage pool %s due to %s", pool, answer.getDetails())); } StorPoolModifyStoragePoolAnswer mspAnswer = (StorPoolModifyStoragePoolAnswer)answer; @@ -152,9 +151,8 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep List localStoragePools = primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); - throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" - + localStoragePool.getName()); + logger.warn("Storage pool: {} has already been added as local storage: {}", pool, localStoragePool); + throw new StorageConflictException(String.format("Cannot add shared storage pool: %s because it has already been added as local storage: %s", pool, localStoragePool)); } } } @@ -173,7 +171,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao); - StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO.getName(), host.getName()); + StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO, host); return true; } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java index 41e9676bb11f..e7ea0900112d 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java @@ -193,7 +193,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, CopyCmdAnswer answer = null; String err = null; if (res.getError() != null) { - logger.debug(String.format("Could not create volume from snapshot with ID=%s", snapshot.getId())); + logger.debug("Could not create volume from snapshot [ID: {}, name: {}]", snapshot.getId(), snapshot.getName()); StorPoolUtil.spLog("Volume create failed with error=%s", res.getError().getDescr()); err = res.getError().getDescr(); } else { @@ -221,7 +221,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, if (answer != null && answer.getResult()) { SpApiResponse resSnapshot = StorPoolUtil.volumeFreeze(volumeName, conn); if (resSnapshot.getError() != null) { - logger.debug(String.format("Could not snapshot volume with ID=%s", snapshot.getId())); + logger.debug("Could not snapshot volume [id: {}, name: {}]", snapshot.getId(), snapshot.getName()); StorPoolUtil.spLog("Volume freeze failed with error=%s", resSnapshot.getError().getDescr()); err = resSnapshot.getError().getDescr(); StorPoolUtil.volumeDelete(volumeName, conn); @@ -297,7 +297,7 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach for (Map.Entry entry : volumeDataStoreMap.entrySet()) { VolumeInfo srcVolumeInfo = entry.getKey(); if (srcVolumeInfo.getPassphraseId() != null) { - throw new CloudRuntimeException(String.format("Cannot live migrate encrypted volume [%s] to StorPool", srcVolumeInfo.getName())); + throw new CloudRuntimeException(String.format("Cannot live migrate encrypted volume [%s] to StorPool", srcVolumeInfo.getVolume())); } DataStore destDataStore = entry.getValue(); @@ -388,7 +388,7 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach errMsg = String.format( "Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in StorPoolDataMotionStrategy.copyAsync. Error message: [%s].", - vmTO.getId(), srcHost.getId(), destHost.getId(), ex.getMessage()); + vmTO, srcHost, destHost, ex.getMessage()); logger.error(errMsg, ex); throw new CloudRuntimeException(errMsg); @@ -524,13 +524,13 @@ private String generateDestPath(Host destHost, StoragePoolVO destStoragePool, Vo private String connectHostToVolume(Host host, long storagePoolId, String iqn) { ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, true); - return sendModifyTargetsCommand(modifyTargetsCommand, host.getId()).get(0); + return sendModifyTargetsCommand(modifyTargetsCommand, host).get(0); } private void disconnectHostFromVolume(Host host, long storagePoolId, String iqn) { ModifyTargetsCommand modifyTargetsCommand = getModifyTargetsCommand(storagePoolId, iqn, false); - sendModifyTargetsCommand(modifyTargetsCommand, host.getId()); + sendModifyTargetsCommand(modifyTargetsCommand, host); } private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String iqn, boolean add) { @@ -558,15 +558,15 @@ private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String return cmd; } - private List sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { - ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer) _agentManager.easySend(hostId, cmd); + private List sendModifyTargetsCommand(ModifyTargetsCommand cmd, Host host) { + ModifyTargetsAnswer modifyTargetsAnswer = (ModifyTargetsAnswer) _agentManager.easySend(host.getId(), cmd); if (modifyTargetsAnswer == null) { throw new CloudRuntimeException("Unable to get an answer to the modify targets command"); } if (!modifyTargetsAnswer.getResult()) { - String msg = "Unable to modify targets on the following host: " + hostId; + String msg = String.format("Unable to modify targets on the following host: %s", host); throw new CloudRuntimeException(msg); } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java index c7bcc8a46b7f..5ec86df91e17 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java @@ -121,7 +121,7 @@ public boolean deleteSnapshot(Long snapshotId, Long zoneId) { } else { res = deleteSnapshotFromDbIfNeeded(snapshotVO, zoneId); markSnapshotAsDestroyedIfAlreadyRemoved(snapshotId,true); - StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot uuid=%s, name=%s", res, snapshotVO.getUuid(), name); + StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot %s, name=%s", res, snapshotVO, name); } } catch (Exception e) { String errMsg = String.format("Cannot delete snapshot due to %s", e.getMessage()); @@ -152,7 +152,7 @@ private void markSnapshotAsDestroyedIfAlreadyRemoved(Long snapshotId, boolean is @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { - logger.debug(String.format("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op)); + logger.debug("StorpoolSnapshotStrategy.canHandle: snapshot {}, op={}", snapshot, op); if (op != SnapshotOperation.DELETE) { return StrategyPriority.CANT_HANDLE; @@ -181,7 +181,7 @@ public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperat } private boolean deleteSnapshotChain(SnapshotInfo snapshot) { - logger.debug("delete snapshot chain for snapshot: " + snapshot.getId()); + logger.debug("delete snapshot chain for snapshot: {}", snapshot); final SnapshotInfo snapOnImage = snapshot; boolean result = false; boolean resultIsSet = false; @@ -194,7 +194,7 @@ private boolean deleteSnapshotChain(SnapshotInfo snapshot) { logger.debug("the snapshot has child, can't delete it on the storage"); break; } - logger.debug("Snapshot: " + snapshot.getId() + " doesn't have children, so it's ok to delete it and its parents"); + logger.debug("Snapshot: {} doesn't have children, so it's ok to delete it and its parents", snapshot); SnapshotInfo parent = snapshot.getParent(); boolean deleted = false; if (parent != null) { @@ -216,7 +216,7 @@ private boolean deleteSnapshotChain(SnapshotInfo snapshot) { if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - logger.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName()); + logger.debug("Delete snapshot {} from image cache store: {}", snapshot, cacheSnap.getDataStore()); cacheSnap.delete(); } } @@ -335,7 +335,7 @@ private boolean deleteSnapshotFromDbIfNeeded(SnapshotVO snapshotVO, Long zoneId) if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Error.equals(snapshotVO.getState()) && !Snapshot.State.Destroying.equals(snapshotVO.getState())) { - throw new InvalidParameterValueException("Can't delete snapshot " + snapshotId + " due to it is in " + snapshotVO.getState() + " Status"); + throw new InvalidParameterValueException(String.format("Can't delete snapshot %s due to it is in %s Status", snapshotVO, snapshotVO.getState())); } List storeRefs = _snapshotStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image); if (zoneId != null) { diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java index 2596b6a5bde7..e5b24a3f98ce 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java @@ -148,7 +148,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { vmSnapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(poolId), false); vmSnapshotDetailsDao.persist(vmSnapshotDetailStoragePoolId); } - StorPoolUtil.spLog("Snapshot=%s of volume=%s for a group snapshot=%s.", snapshot, vol.getUuid(), vmSnapshot.getUuid()); + StorPoolUtil.spLog("Snapshot=%s of volume=%s for a group snapshot=%s.", snapshot, vol, vmSnapshot); } } } @@ -237,8 +237,8 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { VMSnapshotDetailsVO snapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(), volumeObjectTO.getUuid()); String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetailsVO.getValue(), true); if (snapshotName == null) { - err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s", - vmSnapshot.getUuid(), volumeObjectTO.getUuid()); + err = String.format("Could not find StorPool's snapshot vm snapshot %s and volume [id: %s, uuid: %s, name: %s]", + vmSnapshot, volumeObjectTO.getId(), volumeObjectTO.getUuid(), volumeObjectTO.getName()); logger.error("Could not delete snapshot for vm:" + err); } StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.deleteVMSnapshot snapshotName=%s", snapshotName); @@ -254,10 +254,9 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { } if (err != null) { StorPoolUtil.spLog( - "StorpoolVMSnapshotStrategy.deleteVMSnapshot delete snapshot=%s of gropusnapshot=%s failed due to %s", - snapshotName, userVm.getInstanceName(), err); - throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " - + userVm.getInstanceName() + " failed due to " + err); + "StorpoolVMSnapshotStrategy.deleteVMSnapshot delete snapshot=%s of group snapshot=%s failed due to %s", + snapshotName, userVm, err); + throw new CloudRuntimeException(String.format("Delete vm snapshot %s of vm %s failed due to %s", vmSnapshot, userVm, err)); } } vmSnapshotDetailsDao.removeDetails(vmSnapshot.getId()); @@ -344,7 +343,7 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { finalizeRevert(vmSnapshotVO, volumeTOs); result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); } catch (CloudRuntimeException | NoTransitionException e) { - String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot.getName(), e.getMessage()); + String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot, e.getMessage()); logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java index de66ad4d5e69..dd91891f8343 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java @@ -34,6 +34,14 @@ public DiagnosticsDataObject(DataTO dataTO, DataStore dataStore) { this.dataStore = dataStore; } + @Override + public String toString() { + return "DiagnosticsDataObject{" + + "dataTO=" + dataTO + + ", dataStore=" + getDataStore() + + '}'; + } + @Override public long getId() { return 0; From 785c06424cd4b287a99d85f54a49c09493491829 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Mon, 16 Dec 2024 17:43:46 +0530 Subject: [PATCH 15/22] Improve logging to include more identifiable information for Cmd classes --- .../api/command/admin/host/UpdateHostCmd.java | 5 +++-- .../snapshot/CreateSnapshotFromVMSnapshotCmd.java | 15 ++++++++------- .../cluster/DeleteKubernetesClusterCmd.java | 3 ++- .../cluster/ScaleKubernetesClusterCmd.java | 3 ++- .../cluster/StopKubernetesClusterCmd.java | 3 ++- .../cluster/UpgradeKubernetesClusterCmd.java | 3 ++- .../api/command/LdapImportUsersCmd.java | 2 +- .../api/command/LinkAccountToLdapCmd.java | 6 +++--- .../api/command/LinkDomainToLdapCmd.java | 6 +++--- 9 files changed, 26 insertions(+), 20 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java index 88eeadb9b139..397f9c80735e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java @@ -125,8 +125,9 @@ public void execute() { hostResponse.setResponseName(getCommandName()); this.setResponseObject(hostResponse); } catch (Exception e) { - logger.debug("Failed to update host:" + getId(), e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update host:" + getId() + "," + e.getMessage()); + Host host = _entityMgr.findById(Host.class, getId()); + logger.debug("Failed to update host: {} with id {}", host, getId(), e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to update host: %s with id %d, %s", host, getId(), e.getMessage())); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java index 6bebdc09f59b..f281fbaea2e3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java @@ -124,10 +124,10 @@ public long getEntityOwnerId() { if (account.getType() == Account.Type.PROJECT) { Project project = _projectService.findByProjectAccountId(vmsnapshot.getAccountId()); if (project == null) { - throw new InvalidParameterValueException("Unable to find project by account id=" + account.getUuid()); + throw new InvalidParameterValueException(String.format("Unable to find project by account %s", account)); } if (project.getState() != Project.State.Active) { - throw new PermissionDeniedException("Can't add resources to the project id=" + project.getUuid() + " in state=" + project.getState() + " as it's no longer active"); + throw new PermissionDeniedException(String.format("Can't add resources to the project %s in state=%s as it's no longer active", project, project.getState())); } } else if (account.getState() == Account.State.DISABLED) { throw new PermissionDeniedException("The owner of template is disabled: " + account); @@ -164,8 +164,9 @@ public void create() throws ResourceAllocationException { @Override public void execute() { - logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot id:" + getVMSnapshotId() + " and snapshot id:" + getEntityId() + " starts:" + System.currentTimeMillis()); - CallContext.current().setEventDetails("Vm Snapshot Id: "+ this._uuidMgr.getUuid(VMSnapshot.class, getVMSnapshotId())); + VMSnapshot vmSnapshot = _vmSnapshotService.getVMSnapshotById(getVMSnapshotId()); + logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot {} with id {} and snapshot [id: {}, uuid: {}] starts: {}", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid(), System.currentTimeMillis()); + CallContext.current().setEventDetails("Vm Snapshot Id: "+ vmSnapshot.getUuid()); Snapshot snapshot = null; try { snapshot = _snapshotService.backupSnapshotFromVmSnapshot(getEntityId(), getVmId(), getVolumeId(), getVMSnapshotId()); @@ -174,19 +175,19 @@ public void execute() { response.setResponseName(getCommandName()); this.setResponseObject(response); } else { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot)); } } catch (InvalidParameterValueException ex) { throw ex; } catch (Exception e) { logger.debug("Failed to create snapshot", e); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to create snapshot due to an internal error creating snapshot from vm snapshot %s", vmSnapshot)); } finally { if (snapshot == null) { try { _snapshotService.deleteSnapshot(getEntityId(), null); } catch (Exception e) { - logger.debug("Failed to clean failed snapshot" + getEntityId()); + logger.debug("Failed to clean failed snapshot {} with id {}", () -> _entityMgr.findById(Snapshot.class, getEntityId()), this::getEntityId); } } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java index 2ce8151c0631..ca7eb985be0c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java @@ -93,7 +93,8 @@ public Boolean getExpunge() { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.deleteKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete Kubernetes cluster %s with id: %d", cluster, getId())); } SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index ee3566564e41..59c2bebf961d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -158,7 +158,8 @@ public ApiCommandResourceType getApiResourceType() { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.scaleKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to scale Kubernetes cluster %s with id %d", cluster, getId())); } final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); response.setResponseName(getCommandName()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java index 23d6878cf729..7da778534969 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java @@ -100,7 +100,8 @@ public ApiCommandResourceType getApiResourceType() { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.stopKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster %s with id %d", cluster, getId())); } final SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java index a3f2e0576459..04a2075c50df 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java @@ -110,7 +110,8 @@ public ApiCommandResourceType getApiResourceType() { public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.upgradeKubernetesCluster(this)) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster ID: %d", getId())); + KubernetesCluster cluster = kubernetesClusterService.findById(getId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to upgrade Kubernetes cluster %s with id %d", cluster, getId())); } final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); response.setResponseName(getCommandName()); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java index 087bd63c2969..eada5f6df39b 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java @@ -117,7 +117,7 @@ private void createCloudstackUserAccount(LdapUser user, String accountName, Doma _accountService.createUser(user.getUsername(), generatePassword(), user.getFirstname(), user.getLastname(), user.getEmail(), timezone, accountName, domain.getId(), UUID.randomUUID().toString(), User.Source.LDAP); } else { - logger.debug("Account [name=%s] and user [name=%s] already exist in CloudStack. Executing the user update."); + logger.debug("Account [name={}] and user [name={}] already exist in CloudStack. Executing the user update.", account, csuser); UpdateUserCmd updateUserCmd = new UpdateUserCmd(); updateUserCmd.setId(csuser.getId()); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java index 6219fc90f810..52ece5c44f43 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java @@ -91,12 +91,12 @@ public void execute() throws ServerApiException { .createUserAccount(admin, "", ldapUser.getFirstname(), ldapUser.getLastname(), ldapUser.getEmail(), null, admin, Account.Type.DOMAIN_ADMIN, RoleType.DomainAdmin.getId(), domainId, null, null, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); response.setAdminId(String.valueOf(userAccount.getAccountId())); - logger.info("created an account with name " + admin + " in the given domain " + domainId); + logger.info("created an account with name {} in the given domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } catch (Exception e) { - logger.info("an exception occurred while creating account with name " + admin + " in domain " + domainId, e); + logger.info("an exception occurred while creating account with name {} in domain {} with id {}", admin, _domainService.getDomain(domainId), domainId, e); } } else { - logger.debug("an account with name " + admin + " already exists in the domain " + domainId); + logger.debug("an account with name {} already exists in the domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } } else { logger.debug("ldap user with username " + admin + " is disabled in the given group/ou"); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java index d5187f99c995..c351924de6de 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java @@ -107,12 +107,12 @@ public void execute() throws ServerApiException { UserAccount userAccount = _accountService.createUserAccount(admin, "", ldapUser.getFirstname(), ldapUser.getLastname(), ldapUser.getEmail(), null, admin, Account.Type.DOMAIN_ADMIN, RoleType.DomainAdmin.getId(), domainId, null, null, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); response.setAdminId(String.valueOf(userAccount.getAccountId())); - logger.info("created an account with name " + admin + " in the given domain " + domainId); + logger.info("created an account with name {} in the given domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } catch (Exception e) { - logger.info("an exception occurred while creating account with name " + admin +" in domain " + domainId, e); + logger.info("an exception occurred while creating account with name {} in domain {} with id {}", admin, _domainService.getDomain(domainId), domainId, e); } } else { - logger.debug("an account with name " + admin + " already exists in the domain " + domainId); + logger.debug("an account with name {} already exists in the domain {} with id {}", admin, _domainService.getDomain(domainId), domainId); } } else { logger.debug("ldap user with username "+admin+" is disabled in the given group/ou"); From c197b83a2badb087359d48811a13dd6ae88e4148 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Tue, 17 Dec 2024 10:23:15 +0530 Subject: [PATCH 16/22] Minor fixups --- .../com/cloud/network/rules/RulesManager.java | 5 +- .../cloud/vm/VirtualMachineManagerImpl.java | 14 ++-- .../vm/VirtualMachinePowerStateSync.java | 5 +- .../vm/VirtualMachinePowerStateSyncImpl.java | 80 ++++++++++--------- .../orchestration/NetworkOrchestrator.java | 4 +- .../com/cloud/storage/SnapshotScheduleVO.java | 2 - .../cloud/vm/dao/VMInstanceDaoImplTest.java | 20 +++-- .../storage/volume/VolumeObject.java | 2 +- .../kvm/storage/KVMStorageProcessor.java | 2 +- .../KubernetesClusterActionWorker.java | 8 +- ...esClusterResourceModifierActionWorker.java | 33 ++++---- .../KubernetesClusterStartWorker.java | 24 +++--- .../KubernetesClusterStopWorker.java | 5 +- .../api/command/LinkAccountToLdapCmdTest.java | 4 + .../api/command/LinkDomainToLdapCmdTest.java | 4 + .../com/cloud/alert/AlertManagerImpl.java | 21 ++++- .../com/cloud/network/Ipv6ServiceImpl.java | 1 - .../network/NetworkMigrationManagerImpl.java | 2 +- .../lb/LoadBalancingRulesManagerImpl.java | 8 +- .../cloud/network/rules/RulesManagerImpl.java | 18 ++--- .../cloud/storage/VolumeApiServiceImpl.java | 2 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 7 +- .../apache/cloudstack/ha/HAManagerImpl.java | 7 +- .../com/cloud/alert/AlertManagerImplTest.java | 25 ++++++ .../cloud/network/NetworkServiceImplTest.java | 2 +- .../network/as/AutoScaleManagerImplTest.java | 6 +- .../HypervisorTemplateAdapterTest.java | 13 ++- .../com/cloud/vm/UserVmManagerImplTest.java | 1 - .../vm/UnmanagedVMsManagerImplTest.java | 1 - 29 files changed, 184 insertions(+), 142 deletions(-) diff --git a/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java b/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java index c77874329fce..79ffdfdb9737 100644 --- a/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java +++ b/engine/components-api/src/main/java/com/cloud/network/rules/RulesManager.java @@ -22,6 +22,7 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; +import com.cloud.network.Network; import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.vm.Nic; @@ -47,7 +48,7 @@ public interface RulesManager extends RulesService { FirewallRule[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose purpose, boolean openFirewall, Account caller, int... ports) throws NetworkRuleConflictException; - boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller); + boolean applyStaticNatsForNetwork(Network network, boolean continueOnError, Account caller); void getSystemIpAndEnableStaticNatForVm(VirtualMachine vm, boolean getNewIp) throws InsufficientAddressCapacityException; @@ -60,7 +61,7 @@ FirewallRule[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose * @param forRevoke * @return */ - boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, Account caller, boolean forRevoke); + boolean applyStaticNatForNetwork(Network network, boolean continueOnError, Account caller, boolean forRevoke); List listAssociatedRulesForGuestNic(Nic nic); diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 1bf7ef5065e4..79e6d2a34b36 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -3838,7 +3838,7 @@ public void processConnect(final Host agent, final StartupCommand cmd, final boo logger.debug("Received startup command from hypervisor host. host: {}", agent); - _syncMgr.resetHostSyncState(agent.getId()); + _syncMgr.resetHostSyncState(agent); if (forRebalance) { logger.debug("Not processing listener {} as connect happens on rebalance process", this); @@ -5795,18 +5795,20 @@ private Pair findClusterAndHostIdForVmFromVolumes(long vmId) { @Override public Pair findClusterAndHostIdForVm(VirtualMachine vm, boolean skipCurrentHostForStartingVm) { Long hostId = null; + Host host = null; if (!skipCurrentHostForStartingVm || !State.Starting.equals(vm.getState())) { hostId = vm.getHostId(); } Long clusterId = null; if(hostId == null) { + if (vm.getLastHostId() == null) { + return findClusterAndHostIdForVmFromVolumes(vm.getId()); + } hostId = vm.getLastHostId(); - logger.debug("host id is null, using last host id {}", hostId); - } - if (hostId == null) { - return findClusterAndHostIdForVmFromVolumes(vm.getId()); + host = _hostDao.findById(hostId); + logger.debug("host id is null, using last host {} with id {}", host, hostId); } - HostVO host = _hostDao.findById(hostId); + host = host == null ? _hostDao.findById(hostId) : host; if (host != null) { clusterId = host.getClusterId(); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java index b2a48a026a3c..0f399cf4381a 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSync.java @@ -19,15 +19,14 @@ import java.util.Map; import com.cloud.agent.api.HostVmStateReportEntry; +import com.cloud.host.Host; public interface VirtualMachinePowerStateSync { - void resetHostSyncState(long hostId); + void resetHostSyncState(Host hostId); void processHostVmStateReport(long hostId, Map report); // to adapt legacy ping report void processHostVmStatePingReport(long hostId, Map report, boolean force); - - Map convertVmStateReport(Map states); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 4c89a75d2151..94dddfdf18ab 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -24,6 +24,10 @@ import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.utils.Pair; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.logging.log4j.Logger; @@ -40,54 +44,57 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat @Inject MessageBus _messageBus; @Inject VMInstanceDao _instanceDao; + @Inject HostDao hostDao; @Inject ManagementServiceConfiguration mgmtServiceConf; public VirtualMachinePowerStateSyncImpl() { } @Override - public void resetHostSyncState(long hostId) { - logger.info("Reset VM power state sync for host: {}.", hostId); - _instanceDao.resetHostPowerStateTracking(hostId); + public void resetHostSyncState(Host host) { + logger.info("Reset VM power state sync for host: {}", host); + _instanceDao.resetHostPowerStateTracking(host.getId()); } @Override public void processHostVmStateReport(long hostId, Map report) { - logger.debug("Process host VM state report. host: {}.", hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Process host VM state report. host: {}", host); - Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo, false); + Map> translatedInfo = convertVmStateReport(report); + processReport(host, translatedInfo, false); } @Override public void processHostVmStatePingReport(long hostId, Map report, boolean force) { - logger.debug("Process host VM state report from ping process. host: {}.", hostId); + HostVO host = hostDao.findById(hostId); + logger.debug("Process host VM state report from ping process. host: {}", host); - Map translatedInfo = convertVmStateReport(report); - processReport(hostId, translatedInfo, force); + Map> translatedInfo = convertVmStateReport(report); + processReport(host, translatedInfo, force); } - private void processReport(long hostId, Map translatedInfo, boolean force) { + private void processReport(HostVO host, Map> translatedInfo, boolean force) { - logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size()); + logger.debug("Process VM state report. host: {}, number of records in report: {}.", host, translatedInfo.size()); - for (Map.Entry entry : translatedInfo.entrySet()) { + for (Map.Entry> entry : translatedInfo.entrySet()) { - logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); + logger.debug("VM state report. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); - if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); + if (_instanceDao.updatePowerState(entry.getKey(), host.getId(), entry.getValue().first(), DateUtil.currentGMTTime())) { + logger.debug("VM state report is updated. host: {}, vm: {}, power state: {}", host, entry.getValue().second(), entry.getValue().first()); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); } else { - logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey()); + logger.trace("VM power state does not change, skip DB writing. vm: {}", entry.getValue().second()); } } // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(hostId, VirtualMachine.State.Running, + List vmsThatAreMissingReport = _instanceDao.findByHostInStates(host.getId(), VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); java.util.Iterator it = vmsThatAreMissingReport.iterator(); while (it.hasNext()) { @@ -99,7 +106,7 @@ private void processReport(long hostId, Map tra // here we need to be wary of out of band migration as opposed to other, more unexpected state changes if (vmsThatAreMissingReport.size() > 0) { Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); + logger.debug("Run missing VM report for host {}. current time: {}", host, currentTime.getTime()); // 2 times of sync-update interval for graceful period long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; @@ -109,60 +116,55 @@ private void processReport(long hostId, Map tra // Make sure powerState is up to date for missing VMs try { if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId()); + logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM: {}", instance); _instanceDao.resetVmPowerStateTracking(instance.getId()); continue; } } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm", e); + logger.warn("Checked for missing powerstate of a none existing vm {}", instance, e); continue; } Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId()); + logger.warn("VM power state update time is null, falling back to update time for vm: {}", instance); vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId()); + logger.warn("VM update time is null, falling back to creation time for vm: {}", instance); vmStateUpdateTime = instance.getCreated(); } } String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}" - , hostId - , instance.getId() - , instance.getUuid() - , VirtualMachine.PowerState.PowerReportMissing - , lastTime); + logger.debug("Detected missing VM. host: {}, vm: {}, power state: {}, last state update: {}", + host, instance, VirtualMachine.PowerState.PowerReportMissing, lastTime); long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate); + logger.debug("vm: {} - time since last state update({}ms) has passed graceful period", instance, milliSecondsSinceLastStateUpdate); // this is were a race condition might have happened if we don't re-fetch the instance; // between the startime of this job and the currentTime of this missing-branch // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId()); + if (_instanceDao.updatePowerState(instance.getId(), host.getId(), VirtualMachine.PowerState.PowerReportMissing, startTime)) { + logger.debug("VM state report is updated. host: {}, vm: {}, power state: PowerReportMissing ", host, instance); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); } else { - logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId()); + logger.debug("VM power state does not change, skip DB writing. vm: {}", instance); } } else { - logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate); + logger.debug("vm: {} - time since last state update({} ms) has not passed graceful period yet", instance, milliSecondsSinceLastStateUpdate); } } } - logger.debug("Done with process of VM state report. host: {}", hostId); + logger.debug("Done with process of VM state report. host: {}", host); } - @Override - public Map convertVmStateReport(Map states) { - final HashMap map = new HashMap(); + public Map> convertVmStateReport(Map states) { + final HashMap> map = new HashMap<>(); if (states == null) { return map; } @@ -170,9 +172,9 @@ public Map convertVmStateReport(Map entry : states.entrySet()) { VMInstanceVO vm = findVM(entry.getKey()); if (vm != null) { - map.put(vm.getId(), entry.getValue().getState()); + map.put(vm.getId(), new Pair<>(entry.getValue().getState(), vm)); } else { - logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey()); + logger.debug("Unable to find matched VM in CloudStack DB. name: {} powerstate: {}", entry.getKey(), entry.getValue()); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 7270742c1364..7efc29b02a63 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1761,7 +1761,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call // apply static nat - if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { + if (!_rulesMgr.applyStaticNatsForNetwork(network, false, caller)) { logger.warn("Failed to apply static nats a part of network {} restart", network); success = false; } @@ -4215,7 +4215,7 @@ private boolean shutdownNetworkResources(final Network network, final Account ca } //release all static nats for the network - if (!_rulesMgr.applyStaticNatForNetwork(network.getId(), false, caller, true)) { + if (!_rulesMgr.applyStaticNatForNetwork(network, false, caller, true)) { logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network {}", network); success = false; } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java index dc2694cfbb12..5e013e76d3c8 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java @@ -30,8 +30,6 @@ import com.cloud.storage.snapshot.SnapshotSchedule; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.commons.lang3.builder.ReflectionToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; @Entity @Table(name = "snapshot_schedule") diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java index 43679081550b..5f8b2dd90eca 100644 --- a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java @@ -34,10 +34,13 @@ import java.util.Calendar; import java.util.Date; +import com.cloud.host.dao.HostDao; import org.joda.time.DateTime; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; @@ -49,19 +52,24 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import org.mockito.junit.MockitoJUnitRunner; /** * Created by sudharma_jain on 3/2/17. */ - +@RunWith(MockitoJUnitRunner.class) public class VMInstanceDaoImplTest { + @InjectMocks @Spy - VMInstanceDaoImpl vmInstanceDao = new VMInstanceDaoImpl(); + VMInstanceDaoImpl vmInstanceDao; @Mock VMInstanceVO vm; + @Mock + HostDao _hostDao; + private AutoCloseable closeable; @Before @@ -111,9 +119,6 @@ public void testUpdatePowerStateDifferentPowerState() { @Test public void testUpdatePowerStateVmNotFound() { - when(vm.getPowerStateUpdateTime()).thenReturn(null); - when(vm.getPowerHostId()).thenReturn(1L); - when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); doReturn(null).when(vmInstanceDao).findById(anyLong()); boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOff, new Date()); @@ -154,7 +159,6 @@ public void testUpdatePowerStateNoChangeMaxUpdatesValidState() { when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Running); doReturn(vm).when(vmInstanceDao).findById(anyLong()); - doReturn(true).when(vmInstanceDao).update(anyLong(), any()); boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOn, new Date()); @@ -170,8 +174,8 @@ public void testUpdatePowerStateNoChangeMaxUpdatesValidState() { public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmStopped() { when(vm.getPowerStateUpdateTime()).thenReturn(null); when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getHostId()).thenReturn(1L); when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); - when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Stopped); doReturn(vm).when(vmInstanceDao).findById(anyLong()); doReturn(true).when(vmInstanceDao).update(anyLong(), any()); @@ -190,8 +194,8 @@ public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmStopped() { public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmRunning() { when(vm.getPowerStateUpdateTime()).thenReturn(null); when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getHostId()).thenReturn(1L); when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOff); - when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); when(vm.getState()).thenReturn(Running); doReturn(vm).when(vmInstanceDao).findById(anyLong()); doReturn(true).when(vmInstanceDao).update(anyLong(), any()); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index d85ade0143d7..b8f30aff8cfd 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -900,7 +900,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { volumeVO.setPassphraseId(null); volumeDao.persist(volumeVO); - logger.debug(String.format("Checking to see if we can delete passphrase id %s", passphraseId)); + logger.debug("Checking to see if we can delete passphrase id {} for volume {}", passphraseId, volumeVO); List volumes = volumeDao.listVolumesByPassphraseId(passphraseId); if (volumes != null && !volumes.isEmpty()) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 10cec7025139..92e4570170e3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -2426,7 +2426,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) logger.debug("Checking for free space on the host for downloading the template with physical size: " + templateSize + " and virtual size: " + cmd.getTemplateSize()); if (!isEnoughSpaceForDownloadTemplateOnTemporaryLocation(templateSize)) { - String msg = String.format("Not enough space on the defined temporary location to download the template %swith id %d", cmd.getDestData(), cmd.getTemplateId()); + String msg = String.format("Not enough space on the defined temporary location to download the template %s with id %d", cmd.getDestData(), cmd.getTemplateId()); logger.error(msg); return new DirectDownloadAnswer(false, msg, true); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 743962a1f00b..076bd105728c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -480,7 +480,7 @@ protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesS try { templateService.attachIso(iso.getId(), vm.getId(), true); if (logger.isInfoEnabled()) { - logger.info(String.format("Attached binaries ISO for VM : %s in cluster: %s", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Attached binaries ISO for VM: {} in cluster: {}", vm, kubernetesCluster); } } catch (CloudRuntimeException ex) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to attach binaries ISO for VM : %s in the Kubernetes cluster name: %s", vm.getDisplayName(), kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent, ex); @@ -502,17 +502,17 @@ protected void detachIsoKubernetesVMs(List clusterVMs) { try { result = templateService.detachIso(vm.getId(), true); } catch (CloudRuntimeException ex) { - logger.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName()), ex); + logger.warn("Failed to detach binaries ISO from VM: {} in the Kubernetes cluster: {} ", vm, kubernetesCluster, ex); } finally { CallContext.unregister(); } if (result) { if (logger.isInfoEnabled()) { - logger.info(String.format("Detached Kubernetes binaries from VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Detached Kubernetes binaries from VM: {} in the Kubernetes cluster: {}", vm, kubernetesCluster); } continue; } - logger.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName())); + logger.warn("Failed to detach binaries ISO from VM: {} in the Kubernetes cluster: {} ", vm, kubernetesCluster); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 3f0125728c16..8c983149d02d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -274,19 +274,19 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f } if (!suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts not found in datacenter : %s for node %d, with offering : %s and hypervisor: %s", - zone.getName(), i, offering.getName(), clusterTemplate.getHypervisorType().toString())); + logger.info("Suitable hosts not found in datacenter: {} for node {}, with offering: {} and hypervisor: {}", + zone, i, offering, clusterTemplate.getHypervisorType().toString()); } break; } } if (suitable_host_found) { if (logger.isInfoEnabled()) { - logger.info(String.format("Suitable hosts found in datacenter : %s, creating deployment destination", zone.getName())); + logger.info("Suitable hosts found in datacenter: {}, creating deployment destination", zone); } return new DeployDestination(zone, null, null, null); } - String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering : %s and hypervisor: %s", + String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering: %s and hypervisor: %s", cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getName(), clusterTemplate.getHypervisorType().toString()); logger.warn(msg); @@ -297,7 +297,7 @@ protected DeployDestination plan() throws InsufficientServerCapacityException { ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (logger.isDebugEnabled()) { - logger.debug(String.format("Checking deployment destination for Kubernetes cluster : %s in zone : %s", kubernetesCluster.getName(), zone.getName())); + logger.debug("Checking deployment destination for Kubernetes cluster: {} in zone: {}", kubernetesCluster, zone); } return plan(kubernetesCluster.getTotalNodeCount(), zone, offering); } @@ -362,7 +362,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster : %s", kubernetesCluster.getName())); } nodes.add(vm); - logger.info("Provisioned node VM : {} in to the Kubernetes cluster : {}", vm.getDisplayName(), kubernetesCluster.getName()); + logger.info("Provisioned node VM: {} in to the Kubernetes cluster: {}", vm, kubernetesCluster); } finally { CallContext.unregister(); } @@ -420,7 +420,7 @@ protected UserVm createKubernetesNode(String joinIp) throws ManagementServerExce null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { - logger.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); + logger.info("Created node VM : {}, {} in the Kubernetes cluster : {}", hostName, nodeVm, kubernetesCluster.getName()); } return nodeVm; } @@ -469,7 +469,7 @@ protected void provisionPublicIpPortForwardingRule(IpAddress publicIp, Network n }); rulesService.applyPortForwardingRules(publicIp.getId(), account); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned SSH port forwarding rule: %s from port %d to %d on %s to the VM IP : %s in Kubernetes cluster : %s", pfRule.getUuid(), sourcePort, destPort, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); + logger.info("Provisioned SSH port forwarding rule: {} from port {} to {} on {} to the VM IP: {} in Kubernetes cluster: {}", pfRule, sourcePort, destPort, publicIp.getAddress().addr(), vmIp, kubernetesCluster); } } @@ -637,7 +637,7 @@ protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); + logger.info("Provisioned firewall rule to open up port {} to {} on {} for Kubernetes cluster: {}", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -652,8 +652,7 @@ protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); + logger.info("Provisioned firewall rule to open up port {} on {} for Kubernetes cluster {}", CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -703,8 +702,7 @@ protected void createVpcTierAclRules(Network network) throws ManagementServerExc try { provisionVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Provisioned ACL rule to open up port {} on {} for Kubernetes cluster {}", CLUSTER_API_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -715,8 +713,7 @@ protected void createVpcTierAclRules(Network network) throws ManagementServerExc try { provisionVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", - DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Provisioned ACL rule to open up port {} on {} for Kubernetes cluster {}", DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -733,8 +730,7 @@ protected void removeVpcTierAclRules(Network network) throws ManagementServerExc try { removeVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Removed network ACL rule to open up port {} on {} for Kubernetes cluster {}", CLUSTER_API_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to remove network ACL rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -743,8 +739,7 @@ protected void removeVpcTierAclRules(Network network) throws ManagementServerExc try { removeVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); if (logger.isInfoEnabled()) { - logger.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", - DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); + logger.info("Removed network ACL rule to open up port {} on {} for Kubernetes cluster {}", DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to remove network ACL rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 028ea1c79925..a2384a2e0feb 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -232,7 +232,7 @@ private UserVm createKubernetesControlNode(final Network network, String serverI requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { - logger.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); + logger.info("Created control VM: {}, {} in the Kubernetes cluster: {}", controlVm, hostName, kubernetesCluster); } return controlVm; } @@ -310,7 +310,7 @@ private UserVm createKubernetesAdditionalControlNode(final String joinIp, final } if (logger.isInfoEnabled()) { - logger.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); + logger.info("Created control VM: {}, {} in the Kubernetes cluster: {}", additionalControlVm, hostName, kubernetesCluster); } return additionalControlVm; } @@ -329,7 +329,7 @@ private UserVm provisionKubernetesClusterControlVm(final Network network, final throw new ManagementServerException(String.format("Failed to provision control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned the control VM : %s in to the Kubernetes cluster : %s", k8sControlVM.getDisplayName(), kubernetesCluster.getName())); + logger.info("Provisioned the control VM: {} in to the Kubernetes cluster: {}", k8sControlVM, kubernetesCluster); } return k8sControlVM; } @@ -352,7 +352,7 @@ private List provisionKubernetesClusterAdditionalControlVms(final String } additionalControlVms.add(vm); if (logger.isInfoEnabled()) { - logger.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + logger.info("Provisioned additional control VM: {} in to the Kubernetes cluster: {}", vm, kubernetesCluster); } } } @@ -371,10 +371,10 @@ private Network startKubernetesClusterNetwork(final DeployDestination destinatio try { networkMgr.startNetwork(network.getId(), destination, context); if (logger.isInfoEnabled()) { - logger.info(String.format("Network : %s is started for the Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + logger.info("Network: {} is started for the Kubernetes cluster: {}", network, kubernetesCluster); } } catch (ConcurrentOperationException | ResourceUnavailableException |InsufficientCapacityException e) { - String msg = String.format("Failed to start Kubernetes cluster : %s as unable to start associated network : %s" , kubernetesCluster.getName(), network.getName()); + String msg = String.format("Failed to start Kubernetes cluster: %s as unable to start associated network: %s" , kubernetesCluster, network); logger.error(msg, e); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); throw new ManagementServerException(msg, e); @@ -385,7 +385,7 @@ private Network startKubernetesClusterNetwork(final DeployDestination destinatio protected void setupKubernetesClusterNetworkRules(Network network, List clusterVMs) throws ManagementServerException { if (manager.isDirectAccess(network)) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network or ROUTED network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); + logger.debug("Network: {} for Kubernetes cluster: {} is not an isolated network or ROUTED network, therefore, no need for network rules", network, kubernetesCluster); } return; } @@ -416,7 +416,7 @@ private void startKubernetesClusterVMs() { resizeNodeVolume(vm); startKubernetesVM(vm); } catch (ManagementServerException ex) { - logger.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex); + logger.warn("Failed to start VM: {} in Kubernetes cluster: {} due to {}", vm, kubernetesCluster, ex); // don't bail out here. proceed further to stop the reset of the VM's } } @@ -471,7 +471,7 @@ private void updateKubernetesClusterEntryEndpoint() { public boolean startKubernetesClusterOnCreate() { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Starting Kubernetes cluster: {}", kubernetesCluster); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); @@ -530,7 +530,7 @@ public boolean startKubernetesClusterOnCreate() { logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster : %s VMs successfully provisioned", kubernetesCluster.getName())); + logger.info("Kubernetes cluster: {} VMs successfully provisioned", kubernetesCluster); } try { setupKubernetesClusterNetworkRules(network, clusterVMs); @@ -577,7 +577,7 @@ public boolean startKubernetesClusterOnCreate() { public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Starting Kubernetes cluster: {}", kubernetesCluster); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); @@ -604,7 +604,7 @@ public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); if (logger.isInfoEnabled()) { - logger.info(String.format("Kubernetes cluster : %s successfully started", kubernetesCluster.getName())); + logger.info("Kubernetes cluster: {} successfully started", kubernetesCluster); } return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java index 60802d12e729..59d74751dff9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java @@ -38,7 +38,7 @@ public KubernetesClusterStopWorker(final KubernetesCluster kubernetesCluster, fi public boolean stop() throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { - logger.info(String.format("Stopping Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.info("Stopping Kubernetes cluster: {}", kubernetesCluster); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested); List clusterVMs = getKubernetesClusterVMs(); @@ -51,8 +51,7 @@ public boolean stop() throws CloudRuntimeException { try { userVmService.stopVirtualMachine(vm.getId(), false); } catch (ConcurrentOperationException ex) { - logger.warn(String.format("Failed to stop VM : %s in Kubernetes cluster : %s", - vm.getDisplayName(), kubernetesCluster.getName()), ex); + logger.warn("Failed to stop VM: {} in Kubernetes cluster: {}", vm, kubernetesCluster, ex); } finally { CallContext.unregister(); } diff --git a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java index 62a3a809b16f..adf0f98f2943 100644 --- a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java +++ b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmdTest.java @@ -19,6 +19,7 @@ import com.cloud.user.Account; import com.cloud.user.AccountService; +import com.cloud.user.DomainService; import com.cloud.user.User; import com.cloud.user.UserAccountVO; import org.apache.cloudstack.acl.RoleType; @@ -45,6 +46,8 @@ public class LinkAccountToLdapCmdTest implements LdapConfigurationChanger { LdapManager ldapManager; @Mock AccountService accountService; + @Mock + DomainService domainService; LinkAccountToLdapCmd linkAccountToLdapCmd; @@ -53,6 +56,7 @@ public void setUp() throws NoSuchFieldException, IllegalAccessException { linkAccountToLdapCmd = new LinkAccountToLdapCmd(); setHiddenField(linkAccountToLdapCmd, "_ldapManager", ldapManager); setHiddenField(linkAccountToLdapCmd, "_accountService", accountService); + setHiddenField(linkAccountToLdapCmd, "_domainService", domainService); } @Test diff --git a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java index 67d0e7705226..080347fefd32 100644 --- a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java +++ b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmdTest.java @@ -18,6 +18,7 @@ import com.cloud.user.Account; import com.cloud.user.AccountService; +import com.cloud.user.DomainService; import com.cloud.user.User; import com.cloud.user.UserAccountVO; import org.apache.cloudstack.acl.RoleType; @@ -44,6 +45,8 @@ public class LinkDomainToLdapCmdTest implements LdapConfigurationChanger LdapManager ldapManager; @Mock AccountService accountService; + @Mock + DomainService domainService; LinkDomainToLdapCmd linkDomainToLdapCmd; @@ -52,6 +55,7 @@ public void setUp() throws NoSuchFieldException, IllegalAccessException { linkDomainToLdapCmd = new LinkDomainToLdapCmd(); setHiddenField(linkDomainToLdapCmd, "_ldapManager", ldapManager); setHiddenField(linkDomainToLdapCmd, "_accountService", accountService); + setHiddenField(linkDomainToLdapCmd, "_domainService", domainService); } @After diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 4c4f08f12bd7..a9e66c6aecea 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -33,6 +33,9 @@ import javax.mail.MessagingException; import javax.naming.ConfigurationException; +import com.cloud.dc.DataCenter; +import com.cloud.dc.Pod; +import com.cloud.org.Cluster; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -672,7 +675,7 @@ private void generateEmailAlert(DataCenterVO dc, HostPodVO pod, ClusterVO cluste logger.debug(msgSubject); logger.debug(msgContent); } - sendAlert(alertType, dc.getId(), podId, clusterId, msgSubject, msgContent); + sendAlert(alertType, dc, pod, cluster, msgSubject, msgContent); } catch (Exception ex) { logger.error("Exception in CapacityChecker", ex); } @@ -723,15 +726,25 @@ public void clearAlert(short alertType, long dataCenterId, Long podId) { public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long clusterId, String subject, String content) throws MessagingException, UnsupportedEncodingException { - logger.warn(String.format("alertType=[%s] dataCenterId=[%s] podId=[%s] clusterId=[%s] message=[%s].", alertType, dataCenterId, podId, clusterId, subject)); + DataCenterVO zone = _dcDao.findById(dataCenterId); + HostPodVO pod = podId == null ? null : _podDao.findById(podId); + ClusterVO cluster = clusterId == null ? null : _clusterDao.findById(clusterId); + sendAlert(alertType, zone, pod, cluster, subject, content); + } + + public void sendAlert(AlertType alertType, DataCenter dataCenter, Pod pod, Cluster cluster, String subject, String content) + throws MessagingException, UnsupportedEncodingException { + logger.warn(String.format("alertType=[%s] dataCenter=[%s] pod=[%s] cluster=[%s] message=[%s].", alertType, dataCenter, pod, cluster, subject)); AlertVO alert = null; + Long clusterId = cluster == null ? null : cluster.getId(); + Long podId = pod == null ? null : pod.getId(); if ((alertType != AlertManager.AlertType.ALERT_TYPE_HOST) && (alertType != AlertManager.AlertType.ALERT_TYPE_USERVM) && (alertType != AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER) && (alertType != AlertManager.AlertType.ALERT_TYPE_CONSOLE_PROXY) && (alertType != AlertManager.AlertType.ALERT_TYPE_SSVM) && (alertType != AlertManager.AlertType.ALERT_TYPE_STORAGE_MISC) && (alertType != AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE) && (alertType != AlertManager.AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED) && (alertType != AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED) && (alertType != AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR) && (alertType != AlertManager.AlertType.ALERT_TYPE_HA_ACTION) && (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT)) { - alert = _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId); + alert = _alertDao.getLastAlert(alertType.getType(), dataCenter.getId(), podId, clusterId); } if (alert == null) { @@ -741,7 +754,7 @@ public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long c newAlert.setContent(content); newAlert.setClusterId(clusterId); newAlert.setPodId(podId); - newAlert.setDataCenterId(dataCenterId); + newAlert.setDataCenterId(dataCenter.getId()); newAlert.setSentCount(1); newAlert.setLastSent(new Date()); newAlert.setName(alertType.getName()); diff --git a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java index 1894fbcfcb0b..464f8c90ebb6 100644 --- a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java @@ -56,7 +56,6 @@ import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Resource; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterGuestIpv6Prefix; diff --git a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java index 2e92acd71558..91e4fddb69ce 100644 --- a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java @@ -617,7 +617,7 @@ private void reapplyPublicIps(Network networkInOldPhysicalNetwork, Network netwo } } - _rulesMgr.applyStaticNatsForNetwork(networkInNewPhysicalNet.getId(), false, networkAccount); + _rulesMgr.applyStaticNatsForNetwork(networkInNewPhysicalNet, false, networkAccount); } private void copyNicDetails(long originalNicId, long dstNicId) { diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 5d4ce1052cfc..e8a523ec3ac4 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -1438,7 +1438,7 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i lbvm.setRevoke(true); _lb2VmMapDao.persist(lbvm); } - logger.debug("Set load balancer rule for revoke: rule {}, vmId {}", loadBalancer, instanceId); + logger.debug("Set load balancer rule for revoke: rule {}, vm {}", loadBalancer::toString, () -> _vmDao.findById(instanceId)); } else { for (String vmIp: lbVmIps) { @@ -1448,7 +1448,7 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i } map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule {}, vmId {}, vmip {}", loadBalancer, instanceId, vmIp); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}, vmip {}", loadBalancer::toString, () -> _vmDao.findById(instanceId), vmIp::toString); } } } @@ -1526,7 +1526,7 @@ public boolean removeVmFromLoadBalancers(long instanceId) { map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule {}, vm {}", () -> _lbDao.findById(map.getLoadBalancerId()), () -> _vmDao.findById(instanceId)); } // Reapply all lbs that had the vm assigned @@ -1600,7 +1600,7 @@ public List doInTransaction(TransactionStatus status) { for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - logger.debug("Set load balancer rule for revoke: rule {}, vmId {}", lb, map.getInstanceId()); + logger.debug("Set load balancer rule for revoke: rule {}, vmId {}", lb::toString, () -> _vmDao.findById(map.getInstanceId())); } } diff --git a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java index 575694218aab..bfcaca72b317 100644 --- a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java @@ -975,7 +975,7 @@ public boolean applyStaticNatRulesForNetwork(long networkId, boolean continueOnE List staticNatRules = new ArrayList(); if (rules.size() == 0) { - logger.debug("There are no static nat rules to apply for network id=" + networkId); + logger.debug("There are no static nat rules to apply for network {}", _networkModel.getNetwork(networkId)); return true; } @@ -1000,10 +1000,10 @@ public boolean applyStaticNatRulesForNetwork(long networkId, boolean continueOnE } @Override - public boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller) { - List ips = _ipAddressDao.listStaticNatPublicIps(networkId); + public boolean applyStaticNatsForNetwork(Network network, boolean continueOnError, Account caller) { + List ips = _ipAddressDao.listStaticNatPublicIps(network.getId()); if (ips.isEmpty()) { - logger.debug("There are no static nat to apply for network id=" + networkId); + logger.debug("There are no static nat to apply for network {}", network); return true; } @@ -1015,7 +1015,7 @@ public boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError for (IPAddressVO ip : ips) { // Get nic IP4 address //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), networkId); - StaticNatImpl staticNat = new StaticNatImpl(ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), networkId, ip.getId(), ip.getVmIp(), false); + StaticNatImpl staticNat = new StaticNatImpl(ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), network.getId(), ip.getId(), ip.getVmIp(), false); staticNats.add(staticNat); } @@ -1399,8 +1399,8 @@ protected boolean applyStaticNatForIp(long sourceIpId, boolean continueOnError, } @Override - public boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, Account caller, boolean forRevoke) { - List staticNatIps = _ipAddressDao.listStaticNatPublicIps(networkId); + public boolean applyStaticNatForNetwork(Network network, boolean continueOnError, Account caller, boolean forRevoke) { + List staticNatIps = _ipAddressDao.listStaticNatPublicIps(network.getId()); List staticNats = new ArrayList(); for (IpAddress staticNatIp : staticNatIps) { @@ -1409,7 +1409,7 @@ public boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, if (staticNats != null && !staticNats.isEmpty()) { if (forRevoke) { - logger.debug("Found " + staticNats.size() + " static nats to disable for network id " + networkId); + logger.debug("Found {} static nats to disable for network {}", staticNats.size(), network); } try { if (!_ipAddrMgr.applyStaticNats(staticNats, continueOnError, forRevoke)) { @@ -1420,7 +1420,7 @@ public boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, return false; } } else { - logger.debug("Found 0 static nat rules to apply for network id " + networkId); + logger.debug("Found 0 static nat rules to apply for network id {}", network); } return true; diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 3657f675fb97..c1708262a9d9 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -4581,7 +4581,7 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L controllerInfo.put(VmDetailConstants.ROOT_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER)); controllerInfo.put(VmDetailConstants.DATA_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER)); cmd.setControllerInfo(controllerInfo); - logger.debug(String.format("Attach volume %s on VM %s has controller info: %s", volumeToAttach, vm, controllerInfo)); + logger.debug("Attach volume {} on VM {} has controller info: {}", volumeToAttach, vm, controllerInfo); try { answer = (AttachAnswer)_agentMgr.send(hostId, cmd); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index aab90c3ecb82..791fdef7e912 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2623,7 +2623,8 @@ private void updateVmStateForFailedVmCreation(Long vmId, Long hostId) { if (vm != null) { if (vm.getState().equals(State.Stopped)) { - logger.debug("Destroying vm " + vm + " as it failed to create on Host with Id:" + hostId); + HostVO host = _hostDao.findById(hostId); + logger.debug("Destroying vm {} as it failed to create on Host: {} with id {}", vm, host, hostId); try { _itMgr.stateTransitTo(vm, VirtualMachine.Event.OperationFailedToError, null); } catch (NoTransitionException e1) { @@ -2637,7 +2638,7 @@ private void updateVmStateForFailedVmCreation(Long vmId, Long hostId) { volumeMgr.destroyVolume(volume); } } - String msg = String.format("Failed to deploy Vm %s, on Host with Id: %d", vm, hostId); + String msg = String.format("Failed to deploy Vm %s, on Host %s with Id: %d", vm, host, hostId); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); // Get serviceOffering and template for Virtual Machine @@ -8566,7 +8567,7 @@ private boolean checkStatusOfVolumeSnapshots(VirtualMachine vm, Volume.Type type logger.debug("Found {} no. of volumes of type {} for vm with VM ID {}", listVolumes.size(), type, vm); for (VolumeVO volume : listVolumes) { Long volumeId = volume.getId(); - logger.debug("Checking status of snapshots for Volume with Volume: {}", volume); + logger.debug("Checking status of snapshots for Volume: {}", volume); List ongoingSnapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); int ongoingSnapshotsCount = ongoingSnapshots.size(); logger.debug("The count of ongoing Snapshots for VM {} and disk type {} is {}", vm, type, ongoingSnapshotsCount); diff --git a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java index aa9727cf33fb..a016be5c6e32 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java @@ -153,8 +153,8 @@ public boolean transitionHAState(final HAConfig.Event event, final HAConfig haCo final HAConfig.HAState nextState = HAConfig.HAState.getStateMachine().getNextState(currentHAState, event); boolean result = HAConfig.HAState.getStateMachine().transitTo(haConfig, event, null, haConfigDao); if (result) { - final String message = String.format("Transitioned host HA state from:%s to:%s due to event:%s for the host id:%d", - currentHAState, nextState, event, haConfig.getResourceId()); + final String message = String.format("Transitioned host HA state from: %s to: %s due to event:%s for the host %s with id: %d", + currentHAState, nextState, event, hostDao.findByIdIncludingRemoved(haConfig.getResourceId()), haConfig.getResourceId()); logger.debug(message); if (nextState == HAConfig.HAState.Recovering || nextState == HAConfig.HAState.Fencing || nextState == HAConfig.HAState.Fenced) { @@ -164,7 +164,8 @@ public boolean transitionHAState(final HAConfig.Event event, final HAConfig haCo } return result; } catch (NoTransitionException e) { - logger.warn(String.format("Unable to find next HA state for current HA state=[%s] for event=[%s] for host=[%s].", currentHAState, event, haConfig.getResourceId()), e); + logger.warn("Unable to find next HA state for current HA state=[{}] for event=[{}] for host {} with id {}.", + currentHAState, event, hostDao.findByIdIncludingRemoved(haConfig.getResourceId()), haConfig.getResourceId(), e); } return false; } diff --git a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java index ba0d3cab002c..e04c5e181e74 100644 --- a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java +++ b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java @@ -17,6 +17,12 @@ package com.cloud.alert; import com.cloud.alert.dao.AlertDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.logging.log4j.Logger; import org.junit.Assert; @@ -41,6 +47,15 @@ public class AlertManagerImplTest { @Mock AlertDao alertDaoMock; + @Mock + private DataCenterDao _dcDao; + + @Mock + private HostPodDao _podDao; + + @Mock + private ClusterDao _clusterDao; + @Mock AlertVO alertVOMock; @@ -52,6 +67,16 @@ public class AlertManagerImplTest { private void sendMessage (){ try { + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getId()).thenReturn(0L); + Mockito.when(_dcDao.findById(0L)).thenReturn(zone); + HostPodVO pod = Mockito.mock(HostPodVO.class); + Mockito.when(pod.getId()).thenReturn(1L); + Mockito.when(_podDao.findById(1L)).thenReturn(pod); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(_clusterDao.findById(1L)).thenReturn(cluster); + alertManagerImplMock.sendAlert(AlertManager.AlertType.ALERT_TYPE_CPU, 0, 1l, 1l, "", ""); } catch (UnsupportedEncodingException | MessagingException e) { Assert.fail(); diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index a37559e73e3e..aea29d59356e 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -1175,7 +1175,7 @@ public void checkAndSetSourceNatIp() { when(networkVO.getId()).thenReturn(networkId); when(networkVO.getGuestType()).thenReturn(Network.GuestType.Isolated); try { - when(ipAddressManagerMock.allocateIp(any(), anyBoolean(), any(), anyLong(), any(), any(), eq(srcNatIp))).thenReturn(ipAddress); + when(ipAddressManagerMock.allocateIp(any(), anyBoolean(), any(), any(), any(), any(), eq(srcNatIp))).thenReturn(ipAddress); service.checkAndSetRouterSourceNatIp(account, createNetworkCmd, networkVO); } catch (InsufficientAddressCapacityException | ResourceAllocationException e) { Assert.fail(e.getMessage()); diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index 7036cef33ec0..4c5531277fec 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -101,6 +101,7 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VmStats; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; @@ -1267,7 +1268,6 @@ public void testCreateNewVM1() throws ResourceUnavailableException, Insufficient Mockito.doReturn(networkMock).when(autoScaleManagerImplSpy).getNetwork(loadBalancerId); when(networkMock.getId()).thenReturn(networkId); - when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic); when(userVmService.createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), @@ -1315,7 +1315,6 @@ public void testCreateNewVM2() throws ResourceUnavailableException, Insufficient Mockito.doReturn(networkMock).when(autoScaleManagerImplSpy).getNetwork(loadBalancerId); when(networkMock.getId()).thenReturn(networkId); - when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); when(userVmService.createAdvancedSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), @@ -1365,7 +1364,6 @@ public void testCreateNewVM3() throws ResourceUnavailableException, Insufficient Mockito.doReturn(networkMock).when(autoScaleManagerImplSpy).getNetwork(loadBalancerId); when(networkMock.getId()).thenReturn(networkId); - when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); when(userVmService.createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), @@ -1513,11 +1511,13 @@ public void testDoScaleUp() throws ResourceUnavailableException, InsufficientCap when(loadBalancerVMMapMock.getInstanceId()).thenReturn(virtualMachineId + 1); when(loadBalancingRulesService.assignToLoadBalancer(anyLong(), any(), any(), eq(true))).thenReturn(true); + Mockito.doReturn(new Pair>(userVmMock, null)).when(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null); autoScaleManagerImplSpy.doScaleUp(vmGroupId, 1); Mockito.verify(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); Mockito.verify(loadBalancingRulesService).assignToLoadBalancer(anyLong(), any(), any(), eq(true)); + Mockito.verify(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null); } } diff --git a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java index 5307beb4aba8..2a6d7af434a3 100644 --- a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java +++ b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java @@ -451,12 +451,11 @@ public void isZoneAndImageStoreAvailableTestZoneIsNullShouldReturnFalse() { DataCenterVO dataCenterVOMock = null; Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); - Mockito.when(dataStoreMock.getId()).thenReturn(2L); boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - Mockito.verify(loggerMock, Mockito.times(1)).warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", - zoneId, dataStoreMock.getId())); + Mockito.verify(loggerMock, Mockito.times(1)).warn("Unable to find zone by id [{}], so skip downloading template to its image store [{}].", + zoneId, dataStoreMock); Assert.assertFalse(result); } @@ -470,11 +469,10 @@ public void isZoneAndImageStoreAvailableTestZoneIsDisabledShouldReturnFalse() { Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled); - Mockito.when(dataStoreMock.getId()).thenReturn(2L); boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - Mockito.verify(loggerMock, Mockito.times(1)).info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, dataStoreMock.getId())); + Mockito.verify(loggerMock, Mockito.times(1)).info("Zone [{}] is disabled. Skip downloading template to its image store [{}].", dataCenterVOMock, dataStoreMock); Assert.assertFalse(result); } @@ -488,13 +486,12 @@ public void isZoneAndImageStoreAvailableTestImageStoreDoesNotHaveEnoughCapacityS Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); - Mockito.when(dataStoreMock.getId()).thenReturn(2L); Mockito.when(statsCollectorMock.imageStoreHasEnoughCapacity(any(DataStore.class))).thenReturn(false); boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - Mockito.verify(loggerMock, times(1)).info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", - dataStoreMock.getId())); + Mockito.verify(loggerMock, times(1)).info("Image store doesn't have enough capacity. Skip downloading template to this image store [{}].", + dataStoreMock); Assert.assertFalse(result); } diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index 1cfc0cf9a85f..af02c2e1d319 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -710,7 +710,6 @@ private DiskOfferingVO prepareDiskOffering(long rootSize, long diskOfferingId, l Mockito.when(newRootDiskOffering.getId()).thenReturn(diskOfferingId); Mockito.when(newRootDiskOffering.getMinIops()).thenReturn(offeringMinIops); Mockito.when(newRootDiskOffering.getMaxIops()).thenReturn(offeringMaxIops); - Mockito.when(newRootDiskOffering.getName()).thenReturn("OfferingName"); return newRootDiskOffering; } diff --git a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java index d80bbffaaf48..976f687e3eb1 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java @@ -634,7 +634,6 @@ private void baseTestImportVmFromVmwareToKvm(VcenterParameter vcenterParameter, when(convertHost.getStatus()).thenReturn(Status.Up); when(convertHost.getResourceState()).thenReturn(ResourceState.Enabled); when(convertHost.getId()).thenReturn(convertHostId); - when(convertHost.getName()).thenReturn("KVM-Convert-Host"); when(convertHost.getType()).thenReturn(Host.Type.Routing); when(convertHost.getDataCenterId()).thenReturn(zoneId); when(convertHost.getClusterId()).thenReturn(clusterId); From 7ba32df239132d5b7d11fa59ca1870c177026314 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Tue, 24 Dec 2024 11:28:47 +0530 Subject: [PATCH 17/22] Update remaining files --- .../cluster/ClusterDrsAlgorithm.java | 7 +-- .../api/storage/HypervisorHostListener.java | 10 ++++ .../com/cloud/storage/StorageManager.java | 2 +- .../com/cloud/vm/snapshot/VMSnapshotVO.java | 8 ++++ .../cluster/ClusterDrsPlanMigrationVO.java | 8 ++++ .../cloudstack/cluster/ClusterDrsPlanVO.java | 8 ++++ .../apache/cloudstack/network/BgpPeerVO.java | 5 +- .../storage/sharedfs/SharedFSVO.java | 8 ++++ .../cloudstack/vm/schedule/VMScheduleVO.java | 6 +++ .../vm/schedule/VMScheduledJobVO.java | 10 ++++ .../storage/object/store/ObjectStoreImpl.java | 8 ++++ .../BasePrimaryDataStoreLifeCycleImpl.java | 3 ++ .../cloud/cluster/ManagementServerHostVO.java | 2 +- .../cloudstack/framework/events/Event.java | 12 +++++ .../cloudstack/backup/NASBackupProvider.java | 11 ++--- .../apache/cloudstack/cluster/Balanced.java | 22 +++++---- .../cloudstack/cluster/BalancedTest.java | 18 ++++--- .../apache/cloudstack/cluster/Condensed.java | 22 +++++---- .../cloudstack/cluster/CondensedTest.java | 19 +++++--- .../mom/webhook/WebhookServiceImpl.java | 5 +- .../cluster/KubernetesServiceHelperImpl.java | 2 +- .../apache/cloudstack/service/NsxElement.java | 17 ++++--- .../service/NsxGuestNetworkGuru.java | 12 ++--- .../service/NsxPublicNetworkGuru.java | 6 +-- .../cloudstack/service/NsxServiceImpl.java | 6 +-- .../driver/CephObjectStoreDriverImpl.java | 2 +- .../driver/AdaptiveDataStoreDriverImpl.java | 47 ++++++++++--------- .../AdaptiveDataStoreLifeCycleImpl.java | 25 +++++----- .../provider/AdaptivePrimaryHostListener.java | 41 ++++++++++++---- ...tackPrimaryDataStoreLifeCycleImplTest.java | 1 - .../provider/LinstorHostListener.java | 4 +- .../snapshot/LinstorVMSnapshotStrategy.java | 18 +++---- .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 2 - .../api/query/dao/SnapshotJoinDaoImpl.java | 2 +- .../java/com/cloud/bgp/BGPServiceImpl.java | 36 +++++++------- .../com/cloud/storage/StorageManagerImpl.java | 9 ++-- .../storage/listener/StoragePoolMonitor.java | 2 +- .../cluster/ClusterDrsServiceImpl.java | 32 +++++-------- .../network/RoutedIpv4ManagerImpl.java | 4 +- .../heuristics/HeuristicRuleHelper.java | 8 +++- .../storage/object/BucketApiServiceImpl.java | 2 +- .../storage/sharedfs/SharedFSServiceImpl.java | 6 +-- .../template/VnfTemplateManagerImpl.java | 16 +++---- .../VolumeImportUnmanageManagerImpl.java | 34 +++++++------- .../user/UserPasswordResetManagerImpl.java | 35 +++++--------- .../vm/schedule/VMScheduleManagerImpl.java | 6 +-- .../vm/schedule/VMSchedulerImpl.java | 30 +++++++----- .../cluster/ClusterDrsServiceImplTest.java | 8 ++-- .../heuristics/HeuristicRuleHelperTest.java | 3 -- .../VolumeImportUnmanageManagerImplTest.java | 18 +++---- .../vm/schedule/VMSchedulerImplTest.java | 2 +- .../cloud/usage/parser/BucketUsageParser.java | 2 +- .../usage/parser/NetworksUsageParser.java | 4 +- .../cloud/usage/parser/VpcUsageParser.java | 6 +-- 54 files changed, 375 insertions(+), 267 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java index 15f7fcd81741..665f95842b0d 100644 --- a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java +++ b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java @@ -21,6 +21,7 @@ import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.Adapter; @@ -55,8 +56,8 @@ public interface ClusterDrsAlgorithm extends Adapter { * @throws ConfigurationException * if there is an error in the configuration */ - boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException; + boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException; /** @@ -79,7 +80,7 @@ boolean needsDrs(long clusterId, List> cpuList, * * @return a ternary containing improvement, cost, benefit */ - Ternary getMetrics(long clusterId, VirtualMachine vm, ServiceOffering serviceOffering, + Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException; diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java index 6ac4030e1a6f..6b9a48b5a53d 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java @@ -19,12 +19,22 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.exception.StorageConflictException; +import com.cloud.host.Host; +import com.cloud.storage.StoragePool; public interface HypervisorHostListener { boolean hostAdded(long hostId); + default boolean hostConnect(Host host, StoragePool pool) throws StorageConflictException { + return hostConnect(host.getId(), pool.getId()); + } + boolean hostConnect(long hostId, long poolId) throws StorageConflictException; + default boolean hostDisconnected(Host host, StoragePool pool) throws StorageConflictException { + return hostDisconnected(host.getId(), pool.getId()); + } + boolean hostDisconnected(long hostId, long poolId); boolean hostAboutToBeRemoved(long hostId); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 36780d4e260a..0b9f7bcb7db4 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -367,7 +367,7 @@ static Boolean getFullCloneConfiguration(Long storeId) { boolean connectHostToSharedPool(Host host, long poolId) throws StorageUnavailableException, StorageConflictException; - void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException; void enableHost(long hostId) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java index c48396ad0219..5b6f97b82e70 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vm_snapshots") @@ -145,6 +146,13 @@ public VMSnapshotVO(Long accountId, Long domainId, Long vmId, String description this.serviceOfferingId = serviceOfferingId; } + @Override + public String toString() { + return String.format("VMSnapshot %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "vmId")); + } + @Override public String getDescription() { return description; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java index eab2e555d693..6afc2e7707a1 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanMigrationVO.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.cluster; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -66,6 +67,13 @@ protected ClusterDrsPlanMigrationVO() { } + @Override + public String toString() { + return String.format("ClusterDrsPlanMigration %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "planId", "vmId", "jobId")); + } + public long getId() { return id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java index 0ce25ae90fe3..68f7fe4b44e8 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/cluster/ClusterDrsPlanVO.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.cluster; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -68,6 +69,13 @@ protected ClusterDrsPlanVO() { uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("ClusterDrsPlan %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "clusterId")); + } + public long getId() { return id; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java index 0203b34fb1e2..c60a3ec38683 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java @@ -28,6 +28,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "bgp_peers") @@ -83,7 +84,9 @@ public BgpPeerVO(long dcId, String ip4Address, String ip6Address, Long asNumber, @Override public String toString() { - return String.format("BgpPeerVO [%s|%s|%s]", asNumber, ip4Address, ip6Address); + return String.format("BgpPeer %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "asNumber", "ip4Address", "ip6Address")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java index 3b869a5429fe..b6d51e5cfa17 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java @@ -23,6 +23,7 @@ import java.util.UUID; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -120,6 +121,13 @@ public SharedFSVO(String name, String description, long domainId, long accountId this.uuid = UUID.randomUUID().toString(); } + @Override + public String toString() { + return String.format("SharedFS %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "name", "uuid")); + } + @Override public Class getEntityType() { return SharedFS.class; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java index 176f88c5f6ba..e0065db1e77a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleVO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.vm.schedule; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -95,6 +96,11 @@ public VMScheduleVO(long vmId, String description, String schedule, String timeZ this.enabled = enabled; } + @Override + public String toString() { + return String.format("VMSchedule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "action", "description")); + } + @Override public String getUuid() { return uuid; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java index 0c2dd94cce58..b13d77849433 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.vm.schedule; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -71,6 +73,14 @@ public VMScheduledJobVO(long vmId, long vmScheduleId, VMSchedule.Action action, this.scheduledTime = scheduledTime; } + + @Override + public String toString() { + return String.format("VMScheduledJob %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "action", "vmScheduleId", "vmId")); + } + @Override public String getUuid() { return uuid; diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java index f1c27526f529..93d3bd7c2f10 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java @@ -57,6 +57,14 @@ public static ObjectStoreEntity getDataStore(ObjectStoreVO objectStoreVO, Object return instance; } + @Override + public String toString() { + return "ObjectStoreImpl{" + + "objectStoreVO=" + objectStoreVO + + ", provider=" + provider.getName() + + '}'; + } + @Override public DataStoreDriver getDriver() { return this.driver; diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java index 5871ecdee5aa..de3be809a058 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java @@ -22,6 +22,7 @@ import javax.inject.Inject; +import com.cloud.dc.dao.DataCenterDao; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; @@ -55,6 +56,8 @@ public class BasePrimaryDataStoreLifeCycleImpl { @Inject protected HostDao hostDao; @Inject + protected DataCenterDao zoneDao; + @Inject protected StoragePoolHostDao storagePoolHostDao; private List getPoolHostsList(ClusterScope clusterScope, HypervisorType hypervisorType) { diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java index cbd501d27e49..c589f4896b17 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java @@ -202,7 +202,7 @@ public void setAlertCount(int count) { public String toString() { return String.format("ManagementServer %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "msid", "name", "type")); + this, "id", "uuid", "msid", "name")); } @Override diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java index 7a14f385fa15..9dfb5c699b94 100644 --- a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java @@ -49,6 +49,18 @@ public Event(String eventSource, String eventCategory, String eventType, String setResourceUUID(resourceUUID); } + @Override + public String toString() { + return "Event{" + + "eventId=" + eventId + + ", eventUuid='" + eventUuid + '\'' + + ", eventType='" + eventType + '\'' + + ", resourceType='" + resourceType + '\'' + + ", resourceUUID='" + resourceUUID + '\'' + + ", description='" + description + '\'' + + '}'; + } + public Long getEventId() { return eventId; } diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java index 4a6725abdca5..5d3d1a919330 100644 --- a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -107,7 +107,7 @@ protected Host getLastVMHypervisorHost(VirtualMachine vm) { // Try to find any Up host in the same cluster for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(host.getClusterId())) { if (hostInCluster.getStatus() == Status.Up) { - LOG.debug("Found Host " + hostInCluster.getName()); + LOG.debug("Found Host {}", hostInCluster); return hostInCluster; } } @@ -115,7 +115,7 @@ protected Host getLastVMHypervisorHost(VirtualMachine vm) { // Try to find any Host in the zone for (final HostVO hostInZone : hostDao.listByDataCenterIdAndHypervisorType(host.getDataCenterId(), Hypervisor.HypervisorType.KVM)) { if (hostInZone.getStatus() == Status.Up) { - LOG.debug("Found Host " + hostInZone.getName()); + LOG.debug("Found Host {}", hostInZone); return hostInZone; } } @@ -213,7 +213,7 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { List backedVolumes = backup.getBackedUpVolumes(); List volumes = backedVolumes.stream().map(volume -> volumeDao.findByUuid(volume.getUuid())).collect(Collectors.toList()); - LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm.getUuid(), backup.getUuid()); + LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup); BackupRepository backupRepository = getBackupRepository(vm, backup); final Host host = getLastVMHypervisorHost(vm); @@ -263,7 +263,7 @@ public Pair restoreBackedUpVolume(Backup backup, String volumeU Optional matchingVolume = getBackedUpVolumeInfo(backupSourceVm.getBackupVolumeList(), volumeUuid); Long backedUpVolumeSize = matchingVolume.isPresent() ? matchingVolume.get().getSize() : 0L; - LOG.debug("Restoring vm volume" + volumeUuid + "from backup " + backup.getUuid() + " on the NAS Backup Provider"); + LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", volume, backup); BackupRepository backupRepository = getBackupRepository(backupSourceVm, backup); VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), @@ -377,8 +377,7 @@ public Map getBackupMetrics(Long zoneId, List> cpuList, - List> memoryList) throws ConfigurationException { + public boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException { + long clusterId = cluster.getId(); double threshold = getThreshold(clusterId); Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); String drsMetric = ClusterDrsAlgorithm.getClusterDrsMetric(clusterId); String metricType = ClusterDrsAlgorithm.getDrsMetricType(clusterId); Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId); if (imbalance > threshold) { - logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio)); + logger.debug("Cluster {} needs DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio); return true; } else { - logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio)); + logger.debug("Cluster {} does not need DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio); return false; } } @@ -67,15 +69,15 @@ public String getName() { } @Override - public Ternary getMetrics(long clusterId, VirtualMachine vm, + public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); + Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); - logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s", - clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid())); + logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); // This needs more research to determine the cost and benefit of a migration // TODO: Cost should be a factor of the VM size and the host capacity diff --git a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java index a1562b52e384..d51606719584 100644 --- a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java +++ b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.cluster; +import com.cloud.dc.ClusterVO; import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.Ternary; @@ -61,6 +62,8 @@ public class BalancedTest { ServiceOfferingVO serviceOffering; + ClusterVO cluster; + long clusterId = 1L; Map> hostVmMap; @@ -73,6 +76,7 @@ public class BalancedTest { public void setUp() throws NoSuchFieldException, IllegalAccessException { closeable = MockitoAnnotations.openMocks(this); + cluster = Mockito.mock(ClusterVO.class); vm1 = Mockito.mock(VirtualMachine.class); vm2 = Mockito.mock(VirtualMachine.class); @@ -84,10 +88,10 @@ public void setUp() throws NoSuchFieldException, IllegalAccessException { hostVmMap.put(2L, Arrays.asList(vm2, vm3)); serviceOffering = Mockito.mock(ServiceOfferingVO.class); - Mockito.when(vm3.getHostId()).thenReturn(2L); + Mockito.when(cluster.getId()).thenReturn(clusterId); + Mockito.when(vm3.getHostId()).thenReturn(2L); Mockito.when(destHost.getId()).thenReturn(1L); - Mockito.when(serviceOffering.getCpu()).thenReturn(1); Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); Mockito.when(serviceOffering.getRamSize()).thenReturn(1024); @@ -133,7 +137,7 @@ public void tearDown() throws Exception { @Test public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - assertFalse(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertFalse(balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* @@ -143,14 +147,14 @@ public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldExceptio @Test public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - assertTrue(balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertTrue(balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* 3. cluster with "unknown" metric */ @Test public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown"); - assertThrows(ConfigurationException.class, () -> balanced.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertThrows(ConfigurationException.class, () -> balanced.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /** @@ -179,7 +183,7 @@ public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessExce @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - Ternary result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.0, result.first(), 0.01); assertEquals(0.0, result.second(), 0.0); @@ -193,7 +197,7 @@ public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessExcept @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - Ternary result = balanced.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); diff --git a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java index 3a8befa628b1..70c5acd951fe 100644 --- a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java +++ b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java @@ -21,6 +21,7 @@ import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.VirtualMachine; @@ -40,8 +41,9 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { private static final Logger logger = LogManager.getLogger(Condensed.class); @Override - public boolean needsDrs(long clusterId, List> cpuList, - List> memoryList) throws ConfigurationException { + public boolean needsDrs(Cluster cluster, List> cpuList, + List> memoryList) throws ConfigurationException { + long clusterId = cluster.getId(); double threshold = getThreshold(clusterId); Float skipThreshold = ClusterDrsImbalanceSkipThreshold.valueIn(clusterId); Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, skipThreshold); @@ -50,12 +52,12 @@ public boolean needsDrs(long clusterId, List> cpuList, Boolean useRatio = ClusterDrsAlgorithm.getDrsMetricUseRatio(clusterId); if (imbalance < threshold) { - logger.debug(String.format("Cluster %d needs DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold)); + logger.debug("Cluster {} needs DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {} SkipThreshold: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold); return true; } else { - logger.debug(String.format("Cluster %d does not need DRS. Imbalance: %s Threshold: %s Algorithm: %s DRS metric: %s Metric Type: %s Use ratio: %s SkipThreshold: %s", - clusterId, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold)); + logger.debug("Cluster {} does not need DRS. Imbalance: {} Threshold: {} Algorithm: {} DRS metric: {} Metric Type: {} Use ratio: {} SkipThreshold: {}", + cluster, imbalance, threshold, getName(), drsMetric, metricType, useRatio, skipThreshold); return false; } } @@ -70,16 +72,16 @@ public String getName() { } @Override - public Ternary getMetrics(long clusterId, VirtualMachine vm, + public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, new ArrayList<>(hostCpuMap.values()), + Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); - logger.debug(String.format("Cluster %d pre-imbalance: %s post-imbalance: %s Algorithm: %s VM: %s srcHost: %d destHost: %s", - clusterId, preImbalance, postImbalance, getName(), vm.getUuid(), vm.getHostId(), destHost.getUuid())); + logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); // This needs more research to determine the cost and benefit of a migration // TODO: Cost should be a factor of the VM size and the host capacity diff --git a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java index d50727745347..3d3896704dab 100644 --- a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java +++ b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.cluster; +import com.cloud.dc.ClusterVO; import com.cloud.host.Host; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.Ternary; @@ -61,6 +62,8 @@ public class CondensedTest { ServiceOfferingVO serviceOffering; + ClusterVO cluster; + long clusterId = 1L; Map> hostVmMap; @@ -74,6 +77,8 @@ public class CondensedTest { public void setUp() throws NoSuchFieldException, IllegalAccessException { closeable = MockitoAnnotations.openMocks(this); + cluster = Mockito.mock(ClusterVO.class); + vm1 = Mockito.mock(VirtualMachine.class); vm2 = Mockito.mock(VirtualMachine.class); vm3 = Mockito.mock(VirtualMachine.class); // vm to migrate @@ -84,10 +89,10 @@ public void setUp() throws NoSuchFieldException, IllegalAccessException { hostVmMap.put(2L, Arrays.asList(vm2, vm3)); serviceOffering = Mockito.mock(ServiceOfferingVO.class); - Mockito.when(vm3.getHostId()).thenReturn(2L); + Mockito.when(cluster.getId()).thenReturn(clusterId); + Mockito.when(vm3.getHostId()).thenReturn(2L); Mockito.when(destHost.getId()).thenReturn(1L); - Mockito.when(serviceOffering.getCpu()).thenReturn(1); Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); Mockito.when(serviceOffering.getRamSize()).thenReturn(512); @@ -134,7 +139,7 @@ public void tearDown() throws Exception { @Test public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - assertTrue(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertTrue(condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* @@ -144,14 +149,14 @@ public void needsDrsWithCpu() throws ConfigurationException, NoSuchFieldExceptio @Test public void needsDrsWithMemory() throws ConfigurationException, NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - assertFalse(condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertFalse(condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /* 3. cluster with "unknown" metric */ @Test public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "unknown"); - assertThrows(ConfigurationException.class, () -> condensed.needsDrs(clusterId, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); + assertThrows(ConfigurationException.class, () -> condensed.needsDrs(cluster, new ArrayList<>(hostCpuFreeMap.values()), new ArrayList<>(hostMemoryFreeMap.values()))); } /** @@ -180,7 +185,7 @@ public void needsDrsWithUnknown() throws NoSuchFieldException, IllegalAccessExce @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); - Ternary result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(0.0, result.first(), 0.0); assertEquals(0, result.second(), 0.0); @@ -194,7 +199,7 @@ public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessExcept @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); - Ternary result = condensed.getMetrics(clusterId, vm3, serviceOffering, destHost, + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, hostCpuFreeMap, hostMemoryFreeMap, false); assertEquals(-0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java index 58b265a99c06..97d00c45e4d4 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java @@ -103,8 +103,7 @@ protected List getDeliveryJobs(Event event) throws EventBusException { return jobs; } if (event.getResourceAccountId() == null) { - logger.warn("Skipping delivering event [ID: {}, description: {}] to any webhook as account ID is missing", - event.getEventId(), event.getDescription()); + logger.warn("Skipping delivering event {} to any webhook as account ID is missing", event); throw new EventBusException(String.format("Account missing for the event ID: %s", event.getEventUuid())); } List domainIds = new ArrayList<>(); @@ -327,7 +326,7 @@ protected void runCleanupForLongestRunningManagementServer() { } long deliveriesLimit = WebhookDeliveriesLimit.value(); logger.debug("Clearing old deliveries for webhooks with limit={} using management server {}", - deliveriesLimit, msHost.getMsid()); + deliveriesLimit, msHost); long processed = cleanupOldWebhookDeliveries(deliveriesLimit); logger.debug("Cleared old deliveries with limit={} for {} webhooks", deliveriesLimit, processed); } catch (Exception e) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java index efaec61b052e..bf49c2abb8d3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java @@ -92,8 +92,8 @@ public void checkVmCanBeDestroyed(UserVm userVm) { if (vmMapVO == null) { return; } - logger.error(String.format("VM ID: %s is a part of Kubernetes cluster ID: %d", userVm.getId(), vmMapVO.getClusterId())); KubernetesCluster kubernetesCluster = kubernetesClusterDao.findById(vmMapVO.getClusterId()); + logger.error("VM {} is a part of Kubernetes cluster {} with ID: {}", userVm, kubernetesCluster, vmMapVO.getClusterId()); String msg = "Instance is a part of a Kubernetes cluster"; if (kubernetesCluster != null) { if (KubernetesCluster.ClusterType.ExternalManaged.equals(kubernetesCluster.getClusterType())) { diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java index 7673e5a60386..e1b37a8d6533 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java @@ -405,7 +405,7 @@ private Pair validateVpcConfigurationAndGetAccount(DataCenterV boolean forNsx = false; List physicalNetworks = physicalNetworkDao.listByZoneAndTrafficType(zone.getId(), Networks.TrafficType.Guest); if (CollectionUtils.isNullOrEmpty(physicalNetworks)) { - String err = String.format("Desired physical network is not present in the zone %s for traffic type %s. ", zone.getName(), Networks.TrafficType.Guest.name()); + String err = String.format("Desired physical network is not present in the zone %s for traffic type %s. ", zone, Networks.TrafficType.Guest.name()); logger.error(err); throw new InvalidConfigurationException(err); } @@ -498,11 +498,10 @@ public boolean processTimeout(long agentId, long seq) { } protected boolean canHandle(Network network, Network.Service service) { - logger.debug("Checking if Nsx Element can handle service " + service.getName() + " on network " - + network.getDisplayText()); + logger.debug("Checking if Nsx Element can handle service {} on network {}", service.getName(), network); if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - logger.debug("Nsx Element is not a provider for network " + network.getDisplayText()); + logger.debug("Nsx Element is not a provider for network {}", network); return false; } @@ -577,16 +576,16 @@ protected synchronized boolean applyPFRulesInternal(Network network, List ips = _ipAddressDao.listByAssociatedVpc(vpc.getId(), true); if (CollectionUtils.isEmpty(ips)) { - String err = String.format("Cannot find a source NAT IP for the VPC %s", vpc.getName()); + String err = String.format("Cannot find a source NAT IP for the VPC %s", vpc); logger.error(err); throw new CloudRuntimeException(err); } @@ -136,10 +136,10 @@ public NicProfile allocate(Network network, NicProfile nic, VirtualMachineProfil boolean sourceNatEnabled = !NetworkOffering.NetworkMode.ROUTED.equals(vpcVO.getNetworkMode()) && vpcOfferingServiceMapDao.areServicesSupportedByVpcOffering(vpc.getVpcOfferingId(), services); - logger.info(String.format("Creating Tier 1 Gateway for VPC %s", vpc.getName())); + logger.info("Creating Tier 1 Gateway for VPC {}", vpc); boolean result = nsxService.createVpcNetwork(dataCenterId, accountId, domainId, resourceId, vpc.getName(), sourceNatEnabled); if (!result) { - String msg = String.format("Error creating Tier 1 Gateway for VPC %s", vpc.getName()); + String msg = String.format("Error creating Tier 1 Gateway for VPC %s", vpc); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java index 139d8a55e592..64a2514fc51c 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxServiceImpl.java @@ -73,13 +73,13 @@ public boolean updateVpcSourceNatIp(Vpc vpc, IpAddress address) { long zoneId = vpc.getZoneId(); long vpcId = vpc.getId(); - logger.debug(String.format("Updating the source NAT IP for NSX VPC %s to IP: %s", vpc.getName(), address.getAddress().addr())); + logger.debug("Updating the source NAT IP for NSX VPC {} to IP: {}", vpc, address.getAddress().addr()); String tier1GatewayName = NsxControllerUtils.getTier1GatewayName(domainId, accountId, zoneId, vpcId, true); String sourceNatRuleId = NsxControllerUtils.getNsxNatRuleId(domainId, accountId, zoneId, vpcId, true); CreateOrUpdateNsxTier1NatRuleCommand cmd = NsxHelper.createOrUpdateNsxNatRuleCommand(domainId, accountId, zoneId, tier1GatewayName, "SNAT", address.getAddress().addr(), sourceNatRuleId); NsxAnswer answer = nsxControllerUtils.sendNsxCommand(cmd, zoneId); if (!answer.getResult()) { - logger.error(String.format("Could not update the source NAT IP address for VPC %s: %s", vpc.getName(), answer.getDetails())); + logger.error("Could not update the source NAT IP address for VPC {}: {}", vpc, answer.getDetails()); return false; } return true; @@ -109,7 +109,7 @@ public boolean deleteNetwork(long zoneId, long accountId, long domainId, Network network.getVpcId(), vpcName, network.getId(), network.getName()); NsxAnswer result = nsxControllerUtils.sendNsxCommand(deleteNsxSegmentCommand, network.getDataCenterId()); if (!result.getResult()) { - String msg = String.format("Could not remove the NSX segment for network %s: %s", network.getName(), result.getDetails()); + String msg = String.format("Could not remove the NSX segment for network %s: %s", network, result.getDetails()); logger.error(msg); throw new CloudRuntimeException(msg); } diff --git a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java index b2e1d23917b3..551d96eab9af 100644 --- a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java +++ b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java @@ -215,7 +215,7 @@ public boolean createUser(long accountId, long storeId) { RgwAdmin rgwAdmin = getRgwAdminClient(storeId); String username = account.getUuid(); - logger.debug("Attempting to create Ceph RGW user for account " + account.getAccountName() + " with UUID " + username); + logger.debug("Attempting to create Ceph RGW user for account {} with UUID {}", account, username); try { Optional user = rgwAdmin.getUserInfo(username); if (user.isPresent()) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index 329de5a398a2..e573f453a6c3 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -210,8 +210,8 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { CreateCmdResult result = null; try { - logger.info("Volume creation starting for data store [" + dataStore.getName() + - "] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]"); + logger.info("Volume creation starting for data store [{}] and data object [{}] of type [{}]", + dataStore, dataObject, dataObject.getType()); // quota size of the cloudbyte volume will be increased with the given // HypervisorSnapshotReserve @@ -243,7 +243,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { volume = api.getVolume(context, dataIn); if (volume != null) { - logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); + logger.info("Template volume already exists [{}]", dataObject); } } @@ -261,7 +261,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, throw e; } } - logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); + logger.info("New volume created on remote storage for [{}]", dataObject); } // set these from the discovered or created volume before proceeding @@ -273,9 +273,9 @@ public void createAsync(DataStore dataStore, DataObject dataObject, result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); result.setSuccess(true); - logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); + logger.info("Volume creation complete for [{}]", dataObject); } catch (Throwable e) { - logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); + logger.error("Volume creation failed for dataObject [{}]: {}", dataObject, e.toString(), e); result = new CreateCmdResult(null, new Answer(null)); result.setResult(e.toString()); result.setSuccess(false); @@ -318,7 +318,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, AsyncCompletionCallback callback) { CopyCommandResult result = null; try { - logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); + logger.info("Copying volume {} to {}]", srcdata, destdata); if (!canCopy(srcdata, destdata)) { throw new CloudRuntimeException( @@ -330,7 +330,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, Map details = _storagePoolDao.getDetails(storagePool.getId()); ProviderAdapter api = getAPI(storagePool, details); - logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); + logger.info("Copy volume {} to {}", srcdata, destdata); ProviderVolume outVolume; ProviderAdapterContext context = newManagedVolumeContext(destdata); @@ -347,14 +347,14 @@ public void copyAsync(DataObject srcdata, DataObject destdata, // if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size // we won't, however, shrink a volume if its smaller. if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) { - logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize()); + logger.info("Resizing volume {} to requested target volume size of {}", destdata, destdata.getSize()); api.resize(context, destIn, destdata.getSize()); } // initial volume info does not have connection map yet. That is added when grantAccess is called later. String finalPath = generatePathInfo(outVolume, null); persistVolumeData(storagePool, details, destdata, outVolume, null); - logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + logger.info("Copy completed from [{}] to [{}]", srcdata, destdata); VolumeObjectTO voto = new VolumeObjectTO(); voto.setPath(finalPath); @@ -381,9 +381,8 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, @Override public boolean canCopy(DataObject srcData, DataObject destData) { - logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" - + srcData.getDataStore().getId() + " AND destData [" - + destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]"); + logger.debug("canCopy: Checking srcData [{}:{}:{} AND destData [{}:{}:{}]", + srcData, srcData.getType(), srcData.getDataStore(), destData, destData.getType(), destData.getDataStore()); try { if (!isSameProvider(srcData)) { logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); @@ -458,12 +457,14 @@ public void resize(DataObject data, AsyncCompletionCallback cal ProviderAdapterContext context = newManagedVolumeContext(data); ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO); - if (logger.isDebugEnabled()) logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize); + if (logger.isDebugEnabled()) + logger.debug("Calling provider API to resize volume {} to {}", data, resizeParameter.newSize); api.resize(context, dataIn, resizeParameter.newSize); if (vol.isAttachedVM()) { if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) { - if (logger.isDebugEnabled()) logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize); + if (logger.isDebugEnabled()) + logger.debug("Notify currently attached VM of volume resize for {} to {}", data, resizeParameter.newSize); _volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName()); } } @@ -484,7 +485,7 @@ public void resize(DataObject data, AsyncCompletionCallback cal } public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid()); + logger.debug("Granting host {} access to volume {}", host, dataObject); try { StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); @@ -502,10 +503,10 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); - logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid()); + logger.info("Granted host {} access to volume {}", host, dataObject); return true; } catch (Throwable e) { - String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage(); + String msg = String.format("Error granting host %s access to volume %s: %s", host, dataObject, e.getMessage()); logger.error(msg); throw new CloudRuntimeException(msg, e); } @@ -517,7 +518,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) return; } - logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid()); + logger.debug("Revoking access for host {} to volume {}", host, dataObject); try { StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); @@ -535,9 +536,9 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) Map connIdMap = api.getConnectionIdMap(dataIn); persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); - logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid()); + logger.info("Revoked access for host {} to volume {}", host, dataObject); } catch (Throwable e) { - String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage(); + String msg = String.format("Error revoking access for host %s to volume %s: %s", host, dataObject, e.getMessage()); logger.error(msg); throw new CloudRuntimeException(msg, e); } @@ -546,8 +547,8 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { - logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " + - volumeInfo.getPath() + ": " + qualityOfServiceState.toString()); + logger.info("handleQualityOfServiceVolumeMigration: {} path: {}: {}", + volumeInfo, volumeInfo.getPath(), qualityOfServiceState.toString()); } @Override diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index c08d8b8b4f9b..13d889dfc81a 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -262,7 +262,8 @@ private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) { */ @Override public boolean attachCluster(DataStore store, ClusterScope scope) { - logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]"); + ClusterVO cluster = _clusterDao.findById(scope.getScopeId()); + logger.info("Attaching storage pool {} to cluster {}", store, cluster); _dataStoreHelper.attachCluster(store); StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); @@ -272,13 +273,13 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); if (allHosts.isEmpty()) { _primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + throw new CloudRuntimeException(String.format("No host up to associate a storage pool with in cluster %s", cluster)); } if (dataStoreVO.isManaged()) { //boolean success = false; for (HostVO h : allHosts) { - logger.debug("adding host " + h.getName() + " to storage pool " + store.getName()); + logger.debug("adding host {} to storage pool {}", h, store); } } @@ -294,7 +295,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool {} on cluster {}", primarystore, cluster); _primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -304,14 +305,14 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { @Override public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool {} to host {}", store::toString, () -> hostDao.findById(scope.getScopeId())); _dataStoreHelper.attachHost(store, scope, existingInfo); return true; } @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool {} to zone {}", dataStore, zoneDao.findById(scope.getScopeId())); List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); List poolHosts = new ArrayList(); for (HostVO host : hosts) { @@ -336,7 +337,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h */ @Override public boolean maintain(DataStore store) { - logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode"); + logger.info("Placing storage pool {} in maintenance mode", store); if (_storagePoolAutomation.maintain(store)) { return _dataStoreHelper.maintain(store); } else { @@ -349,7 +350,7 @@ public boolean maintain(DataStore store) { */ @Override public boolean cancelMaintain(DataStore store) { - logger.info("Canceling storage pool maintainence for [" + store.getName() + "]"); + logger.info("Canceling storage pool maintenance for {}", store); if (_dataStoreHelper.cancelMaintain(store)) { return _storagePoolAutomation.cancelMaintain(store); } else { @@ -362,7 +363,7 @@ public boolean cancelMaintain(DataStore store) { */ @Override public boolean deleteDataStore(DataStore store) { - logger.info("Delete datastore called for [" + store.getName() + "]"); + logger.info("Delete datastore called for {}", store); return _dataStoreHelper.deletePrimaryDataStore(store); } @@ -371,7 +372,7 @@ public boolean deleteDataStore(DataStore store) { */ @Override public boolean migrateToObjectStore(DataStore store) { - logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time"); + logger.info("Migrate datastore called for {}. This is not currently implemented for this provider at this time", store); return false; } @@ -388,7 +389,7 @@ public void updateStoragePool(StoragePool storagePool, Map newDe */ @Override public void enableStoragePool(DataStore store) { - logger.info("Enabling storage pool [" + store.getName() + "]"); + logger.info("Enabling storage pool {}", store); _dataStoreHelper.enable(store); } @@ -397,7 +398,7 @@ public void enableStoragePool(DataStore store) { */ @Override public void disableStoragePool(DataStore store) { - logger.info("Disabling storage pool [" + store.getName() + "]"); + logger.info("Disabling storage pool {}", store); _dataStoreHelper.disable(store); } } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java index a0c8ee722a03..346649d669b7 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -18,11 +18,16 @@ import javax.inject.Inject; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.StoragePool; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -import com.cloud.exception.StorageConflictException; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.dao.StoragePoolHostDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -32,6 +37,12 @@ public class AdaptivePrimaryHostListener implements HypervisorHostListener { @Inject StoragePoolHostDao storagePoolHostDao; + @Inject + HostDao hostDao; + + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap factoryMap) { } @@ -49,11 +60,18 @@ public boolean hostAdded(long hostId) { } @Override - public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { - logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]"); - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + public boolean hostConnect(long hostId, long poolId) { + HostVO host = hostDao.findById(hostId); + StoragePoolVO pool = primaryDataStoreDao.findById(poolId); + return hostConnect(host, pool); + } + + @Override + public boolean hostConnect(Host host, StoragePool pool) { + logger.debug("hostConnect called for host {}, pool {}", host, pool); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); if (storagePoolHost == null) { - storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + storagePoolHost = new StoragePoolHostVO(pool.getId(), host.getId(), ""); storagePoolHostDao.persist(storagePoolHost); } else { return false; @@ -63,11 +81,18 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep @Override public boolean hostDisconnected(long hostId, long poolId) { - logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]"); - StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); + HostVO host = hostDao.findById(hostId); + StoragePoolVO pool = primaryDataStoreDao.findById(poolId); + return hostDisconnected(host, pool); + } + + @Override + public boolean hostDisconnected(Host host, StoragePool pool){ + logger.debug("hostDisconnected called for host {}, pool {}", host, pool); + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); if (storagePoolHost != null) { - storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + storagePoolHostDao.deleteStoragePoolHostDetails(host.getId(), pool.getId()); } return true; } diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 9c096e8eb5ac..4bab2f83712c 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -149,7 +149,6 @@ public void initMocks() throws StorageConflictException { when(_dataStoreProviderMgr.getDataStoreProvider(anyString())).thenReturn(dataStoreProvider); when(dataStoreProvider.getName()).thenReturn("default"); - when(hostListener.hostConnect(Mockito.anyLong(), Mockito.anyLong())).thenReturn(true); storageMgr.registerHostListener("default", hostListener); diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java index 534431ed681b..da458002f6d7 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/provider/LinstorHostListener.java @@ -18,6 +18,7 @@ import com.cloud.exception.StorageConflictException; import com.cloud.host.HostVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; public class LinstorHostListener extends DefaultHostListener { @Override @@ -27,6 +28,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep host.setParent(host.getName()); hostDao.update(host.getId(), host); } - return super.hostConnect(hostId, poolId); + StoragePoolVO pool = primaryStoreDao.findById(poolId); + return super.hostConnect(host, pool); } } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java index c7fe6d211903..4e4c882ae808 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/snapshot/LinstorVMSnapshotStrategy.java @@ -83,12 +83,12 @@ private void linstorCreateMultiSnapshot( Snapshot snap = new Snapshot(); snap.setName(vmSnapshotVO.getName()); snap.setResourceName(LinstorUtil.RSC_PREFIX + vol.getPath()); - log.debug(String.format("Add volume %s;%s to snapshot", vol.getName(), snap.getResourceName())); + log.debug("Add volume {};{} to snapshot", vol, snap.getResourceName()); cmsReq.addSnapshotsItem(snap); } - log.debug(String.format("Creating multi snapshot %s", vmSnapshotVO.getName())); + log.debug("Creating multi snapshot {}", vmSnapshotVO); ApiCallRcList answers = api.createMultiSnapshot(cmsReq); - log.debug(String.format("Created multi snapshot %s", vmSnapshotVO.getName())); + log.debug("Created multi snapshot {}", vmSnapshotVO); if (answers.hasError()) { throw new CloudRuntimeException( "Error creating vm snapshots: " + LinstorUtil.getBestErrorMessage(answers)); @@ -123,7 +123,7 @@ private long getNewChainSizeAndPublishCreate(VMSnapshot vmSnapshot, List) status -> { - LOGGER.debug(String.format("Persisting AS Number Range %s-%s for the zone %s", startASNumber, endASNumber, zone.getName())); + LOGGER.debug("Persisting AS Number Range {}-{} for the zone {}", startASNumber, endASNumber, zone); ASNumberRangeVO asNumberRangeVO = new ASNumberRangeVO(zoneId, startASNumber, endASNumber); asNumberRangeDao.persist(asNumberRangeVO); for (long asn = startASNumber; asn <= endASNumber; asn++) { - LOGGER.debug(String.format("Persisting AS Number %s for zone %s", asn, zone.getName())); + LOGGER.debug("Persisting AS Number {} for zone {}", asn, zone); ASNumberVO asNumber = new ASNumberVO(asn, asNumberRangeVO.getId(), zoneId); asNumberDao.persist(asNumber); } return asNumberRangeVO; }); } catch (Exception e) { - String err = String.format("Error creating AS Number range %s-%s for zone %s: %s", startASNumber, endASNumber, zone.getName(), e.getMessage()); + String err = String.format("Error creating AS Number range %s-%s for zone %s: %s", startASNumber, endASNumber, zone, e.getMessage()); LOGGER.error(err, e); throw new CloudRuntimeException(err); } @@ -207,8 +207,8 @@ public Pair, Integer> listASNumbers(ListASNumbersCmd cmd) { throw new InvalidParameterException(String.format("Failed to find network with ID: %s", networkId)); } if (network.getVpcId() != null) { - LOGGER.debug(String.format("The network %s is a VPC tier, searching for the AS number on the VPC with ID %s", - network.getName(), network.getVpcId())); + LOGGER.debug("The network {} is a VPC tier, searching for the AS number on the VPC {}", + network::toString, () -> vpcDao.findById(network.getVpcId())); networkSearchId = null; vpcSerchId = network.getVpcId(); } @@ -226,15 +226,17 @@ public boolean allocateASNumber(long zoneId, Long asNumber, Long networkId, Long asNumberDao.findOneByAllocationStateAndZone(zoneId, false); if (asNumberVO == null || asNumberVO.getDataCenterId() != zoneId) { if (asNumber != null) { - LOGGER.error(String.format("Cannot find AS number %s in zone with ID %s", asNumber, zoneId)); + LOGGER.error("Cannot find AS number {} in zone {} with id {}", asNumber, dataCenterDao.findById(zoneId), zoneId); return false; } throw new CloudRuntimeException(String.format("Cannot allocate AS number in zone with ID %s", zoneId)); } long accountId, domainId; String netName; + VpcVO vpc = null; + NetworkVO network = null; if (Objects.nonNull(vpcId)) { - VpcVO vpc = vpcDao.findById(vpcId); + vpc = vpcDao.findById(vpcId); if (vpc == null) { LOGGER.error(String.format("Cannot find VPC with ID %s", vpcId)); return false; @@ -243,7 +245,7 @@ public boolean allocateASNumber(long zoneId, Long asNumber, Long networkId, Long domainId = vpc.getDomainId(); netName = vpc.getName(); } else { - NetworkVO network = networkDao.findById(networkId); + network = networkDao.findById(networkId); if (network == null) { LOGGER.error(String.format("Cannot find network with ID %s", networkId)); return false; @@ -253,8 +255,9 @@ public boolean allocateASNumber(long zoneId, Long asNumber, Long networkId, Long netName = network.getName(); } - LOGGER.debug(String.format("Allocating the AS Number %s to %s %s on zone %s", asNumber, - (Objects.nonNull(vpcId) ? "VPC" : "network"), netName, zoneId)); + LOGGER.debug("Allocating the AS Number {} to {} on zone {}", asNumber::toString, + (Objects.nonNull(vpcId) ? "VPC " + vpc : "network " + network)::toString, + () -> dataCenterDao.findById(zoneId)); asNumberVO.setAllocated(true); asNumberVO.setAllocatedTime(new Date()); if (Objects.nonNull(vpcId)) { @@ -291,11 +294,12 @@ private Pair logAndReturnErrorMessage(String msg) { @ActionEvent(eventType = EventTypes.EVENT_AS_NUMBER_RELEASE, eventDescription = "Releasing AS Number") public Pair releaseASNumber(long zoneId, long asNumber, boolean isDestroyNetworkOperation) { ASNumberVO asNumberVO = asNumberDao.findByAsNumber(asNumber); + DataCenterVO zone = dataCenterDao.findById(zoneId); if (asNumberVO == null) { - return logAndReturnErrorMessage(String.format("Cannot find AS Number %s on zone %s", asNumber, zoneId)); + return logAndReturnErrorMessage(String.format("Cannot find AS Number %s on zone %s", asNumber, zone)); } if (!asNumberVO.isAllocated()) { - LOGGER.debug(String.format("The AS Number %s is not allocated to any network on zone %s, ignoring release", asNumber, zoneId)); + LOGGER.debug("The AS Number {} is not allocated to any network on zone {}, ignoring release", asNumber, zone); return new Pair<>(true, ""); } Long networkId = asNumberVO.getNetworkId(); @@ -306,7 +310,7 @@ public Pair releaseASNumber(long zoneId, long asNumber, boolean return checksResult; } } - LOGGER.debug(String.format("Releasing AS Number %s on zone %s from previous allocation", asNumber, zoneId)); + LOGGER.debug("Releasing AS Number {} on zone {} from previous allocation", asNumber, zone); asNumberVO.setAllocated(false); asNumberVO.setAllocatedTime(null); asNumberVO.setDomainId(null); @@ -361,6 +365,7 @@ public boolean deleteASRange(long id) { long startASNumber = asRange.getStartASNumber(); long endASNumber = asRange.getEndASNumber(); long zoneId = asRange.getDataCenterId(); + DataCenterVO zone = dataCenterDao.findById(zoneId); List allocatedAsNumbers = asNumberDao.listAllocatedByASRange(asRange.getId()); if (Objects.nonNull(allocatedAsNumbers) && !allocatedAsNumbers.isEmpty()) { throw new CloudRuntimeException(String.format("There are %s AS numbers in use from the range %s-%s, cannot remove the range", @@ -374,13 +379,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { LOGGER.debug(String.format("Removed %s AS numbers from the range %s-%s", removedASNumbers, startASNumber, endASNumber)); asNumberRangeDao.remove(id); - LOGGER.debug(String.format("Removing the AS Number Range %s-%s for the zone %s", startASNumber, - endASNumber, zoneId)); + LOGGER.debug("Removing the AS Number Range {}-{} for the zone {}", startASNumber, endASNumber, zone); } }); } catch (Exception e) { String err = String.format("Error removing AS Number range %s-%s for zone %s: %s", - startASNumber, endASNumber, zoneId, e.getMessage()); + startASNumber, endASNumber, zone, e.getMessage()); LOGGER.error(err, e); throw new CloudRuntimeException(err); } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 78aa82b43fef..7b14cae151e8 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1548,18 +1548,17 @@ public boolean connectHostToSharedPool(Host host, long poolId) throws StorageUna DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); - return listener.hostConnect(host.getId(), pool.getId()); + return listener.hostConnect(host, pool); } @Override - public void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { - StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + public void disconnectHostFromSharedPool(Host host, StoragePool pool) throws StorageUnavailableException, StorageConflictException { assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - logger.debug("Removing pool {} from host {}", pool::toString, () -> _hostDao.findById(hostId)); + logger.debug("Removing pool {} from host {}", pool, host); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); - listener.hostDisconnected(hostId, pool.getId()); + listener.hostDisconnected(host, pool); } @Override diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 4d8894936cfd..a0e10c646b58 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -185,7 +185,7 @@ public synchronized boolean processDisconnect(long agentId, String uuid, String } try { - _storageManager.disconnectHostFromSharedPool(host.getId(), pool.getId()); + _storageManager.disconnectHostFromSharedPool(host, pool); } catch (Exception e) { logger.error("Unable to disconnect host {} from storage pool {} due to {}", host, pool, e.toString()); disconnectResult = false; diff --git a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java index 3f312e0ba3ee..a662d47d4541 100644 --- a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java @@ -210,7 +210,7 @@ void updateOldPlanMigrations() { try { updateDrsPlanMigrations(plan); } catch (Exception e) { - logger.error(String.format("Unable to update DRS plan details [id=%d]", plan.getId()), e); + logger.error("Unable to update DRS plan details {}", plan, e); } } } @@ -228,7 +228,7 @@ void updateDrsPlanMigrations(ClusterDrsPlanVO plan) { drsPlanDao.update(plan.getId(), plan); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, EventVO.LEVEL_INFO, EventTypes.EVENT_CLUSTER_DRS, true, - String.format("DRS execution task completed for cluster [id=%s]", plan.getClusterId()), + String.format("DRS execution task completed for cluster %s", clusterDao.findById(plan.getClusterId())), plan.getClusterId(), ApiCommandResourceType.Cluster.toString(), plan.getEventId()); return; } @@ -237,8 +237,7 @@ void updateDrsPlanMigrations(ClusterDrsPlanVO plan) { try { AsyncJobVO job = asyncJobManager.getAsyncJob(migration.getJobId()); if (job == null) { - logger.warn(String.format("Unable to find async job [id=%d] for DRS plan migration [id=%d]", - migration.getJobId(), migration.getId())); + logger.warn("Unable to find async job [id={}] for DRS plan migration {}", migration.getJobId(), migration); migration.setStatus(JobInfo.Status.FAILED); drsPlanMigrationDao.update(migration.getId(), migration); continue; @@ -248,7 +247,7 @@ void updateDrsPlanMigrations(ClusterDrsPlanVO plan) { drsPlanMigrationDao.update(migration.getId(), migration); } } catch (Exception e) { - logger.error(String.format("Unable to update DRS plan migration [id=%d]", migration.getId()), e); + logger.error("Unable to update DRS plan migration {}", migration, e); } } } @@ -291,13 +290,9 @@ void generateDrsPlanForAllClusters() { ClusterDrsMaxMigrations.valueIn(cluster.getId())); savePlan(cluster.getId(), plan, eventId, ClusterDrsPlan.Type.AUTOMATED, ClusterDrsPlan.Status.READY); - logger.info(String.format("Generated DRS plan for cluster %s [id=%s]", cluster.getName(), - cluster.getUuid())); + logger.info("Generated DRS plan for cluster {}", cluster); } catch (Exception e) { - logger.error( - String.format("Unable to generate DRS plans for cluster %s [id=%s]", cluster.getName(), - cluster.getUuid()), - e); + logger.error("Unable to generate DRS plans for cluster {}", cluster, e); } finally { clusterLock.unlock(); } @@ -362,7 +357,7 @@ List> getDrsPlan(Cluster cluster, serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId())); } - while (iteration < maxIterations && algorithm.needsDrs(cluster.getId(), new ArrayList<>(hostCpuMap.values()), + while (iteration < maxIterations && algorithm.needsDrs(cluster, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()))) { Pair bestMigration = getBestMigration(cluster, algorithm, vmList, vmIdServiceOfferingMap, hostCpuMap, hostMemoryMap); @@ -372,8 +367,7 @@ List> getDrsPlan(Cluster cluster, logger.debug("VM migrating to it's original host or no host found for migration"); break; } - logger.debug(String.format("Plan for VM %s to migrate from host %s to host %s", vm.getUuid(), - hostMap.get(vm.getHostId()).getUuid(), destHost.getUuid())); + logger.debug("Plan for VM {} to migrate from host {} to host {}", vm, hostMap.get(vm.getHostId()), destHost); ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId()); migrationPlan.add(new Ternary<>(vm, hostMap.get(vm.getHostId()), hostMap.get(destHost.getId()))); @@ -467,7 +461,7 @@ Pair getBestMigration(Cluster cluster, ClusterDrsAlgorithm if (!suitableDestinationHosts.contains(destHost) || cluster.getId() != destHost.getClusterId()) { continue; } - Ternary metrics = algorithm.getMetrics(cluster.getId(), vm, + Ternary metrics = algorithm.getMetrics(cluster, vm, vmIdServiceOfferingMap.get(vm.getId()), destHost, hostCpuCapacityMap, hostMemoryCapacityMap, requiresStorageMotion.get(destHost)); @@ -528,7 +522,7 @@ void processPlans() { try { executeDrsPlan(plan); } catch (Exception e) { - logger.error(String.format("Unable to execute DRS plan [id=%d]", plan.getId()), e); + logger.error("Unable to execute DRS plan {}", plan, e); } } } @@ -564,16 +558,14 @@ void executeDrsPlan(ClusterDrsPlanVO plan) { migration.getDestHostId())); } - logger.debug( - String.format("Executing DRS plan %s for vm %s to host %s", plan.getId(), vm.getInstanceName(), - host.getName())); + logger.debug("Executing DRS plan {} for vm {} to host {}", plan, vm, host); long jobId = createMigrateVMAsyncJob(vm, host, plan.getEventId()); AsyncJobVO job = asyncJobManager.getAsyncJob(jobId); migration.setJobId(jobId); migration.setStatus(job.getStatus()); drsPlanMigrationDao.update(migration.getId(), migration); } catch (Exception e) { - logger.warn(String.format("Unable to execute DRS plan %s due to %s", plan.getUuid(), e.getMessage())); + logger.warn("Unable to execute DRS plan {} due to {}", plan, e.getMessage()); migration.setStatus(JobInfo.Status.FAILED); drsPlanMigrationDao.update(migration.getId(), migration); } diff --git a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java index ab05895b8d2d..1db2ad432751 100644 --- a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java @@ -962,10 +962,10 @@ public boolean applyRoutingFirewallRule(long id) { return false; } if (!FirewallRule.Purpose.Firewall.equals(rule.getPurpose())) { - logger.error(String.format("Cannot apply routing firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Firewall)); + logger.error("Cannot apply routing firewall rule: {} as purpose {} is not {}", rule, rule.getPurpose(), FirewallRule.Purpose.Firewall); return false; } - logger.debug(String.format("Applying routing firewall rules for rule with ID: %s", rule.getUuid())); + logger.debug("Applying routing firewall rules for rule with ID: {}", rule); List rules = new ArrayList<>(); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress)); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Ingress)); diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java index 267d813364b8..21a34de0d23b 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.storage.heuristics; import com.cloud.api.ApiDBUtils; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; import com.cloud.storage.StorageManager; @@ -74,6 +75,9 @@ public class HeuristicRuleHelper { @Inject private AccountDao accountDao; + @Inject + private DataCenterDao zoneDao; + /** * Returns the {@link DataStore} object if the zone, specified by the ID, has an active heuristic rule for the given {@link HeuristicType}. * It returns null otherwise. @@ -87,10 +91,10 @@ public DataStore getImageStoreIfThereIsHeuristicRule(Long zoneId, HeuristicType HeuristicVO heuristicsVO = secondaryStorageHeuristicDao.findByZoneIdAndType(zoneId, heuristicType); if (heuristicsVO == null) { - logger.debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", zoneId, heuristicType)); + logger.debug("No heuristic rules found for zone [{}] and heuristic type [{}]. Returning null.", () -> zoneDao.findById(zoneId), heuristicType::toString); return null; } else { - logger.debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicsVO, zoneId)); + logger.debug("Found the heuristic rule {} to apply for zone [{}].", heuristicsVO::toString, () -> zoneDao.findById(zoneId)); return interpretHeuristicRule(heuristicsVO.getHeuristicRule(), heuristicType, obj, zoneId); } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java index 58b41d6a55df..389ca52b03bb 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java @@ -117,7 +117,7 @@ public Bucket allocBucket(CreateBucketCmd cmd) { ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object); try { if(!objectStore.createUser(ownerId)) { - logger.error("Failed to create user in objectstore "+ objectStore.getName()); + logger.error("Failed to create user in objectstore {}", objectStore); return null; } } catch (CloudRuntimeException e) { diff --git a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java index 072f7d4cd3eb..4f0aabd3f379 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java @@ -179,8 +179,8 @@ public boolean stateTransitTo(SharedFS sharedFS, Event event) { try { return sharedFSStateMachine.transitTo(sharedFS, event, null, sharedFSDao); } catch (NoTransitionException e) { - String message = String.format("State transit error for Shared FileSystem %s [%s] due to exception: %s.", - sharedFS.getName(), sharedFS.getId(), e.getMessage()); + String message = String.format("State transit error for Shared FileSystem %s due to exception: %s.", + sharedFS, e.getMessage()); logger.error(message, e); throw new CloudRuntimeException(message, e); } @@ -706,7 +706,7 @@ public void cleanupSharedFS(boolean recurring) { deleteSharedFS(sharedFS.getId()); } catch (Exception e) { stateTransitTo(sharedFS, Event.OperationFailed); - logger.error(String.format("Unable to expunge Shared FileSystem [%s] due to: [%s].", sharedFS.getUuid(), e.getMessage())); + logger.error("Unable to expunge Shared FileSystem {} due to: [{}].", sharedFS, e.getMessage()); } } } finally { diff --git a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java index 6a34ca2d0e55..d13492ed2168 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java @@ -268,18 +268,18 @@ protected Map getManagementNetworkAndIp(VirtualMachineTemplate continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.StaticNat)) { - logger.info(String.format("Network ID: %s does not support static nat, " + - "skipping this network configuration for VNF appliance", network.getUuid())); + logger.info("Network ID: {} does not support static nat, " + + "skipping this network configuration for VNF appliance", network); continue; } if (network.getVpcId() != null) { - logger.info(String.format("Network ID: %s is a VPC tier, " + - "skipping this network configuration for VNF appliance", network.getUuid())); + logger.info("Network ID: {} is a VPC tier, " + + "skipping this network configuration for VNF appliance", network); continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.Firewall)) { - logger.info(String.format("Network ID: %s does not support firewall, " + - "skipping this network configuration for VNF appliance", network.getUuid())); + logger.info("Network ID: {} does not support firewall, " + + "skipping this network configuration for VNF appliance", network); continue; } networkAndIpMap.put(network, nic.getIPv4Address()); @@ -326,7 +326,7 @@ public void createIsolatedNetworkRulesForVnfAppliance(DataCenter zone, VirtualMa Set ports = getOpenPortsForVnfAppliance(template); for (Map.Entry entry : networkAndIpMap.entrySet()) { Network network = entry.getKey(); - logger.debug("Creating network rules for VNF appliance on isolated network " + network.getUuid()); + logger.debug("Creating network rules for VNF appliance on isolated network {}", network); String ip = entry.getValue(); IpAddress publicIp = networkService.allocateIP(owner, zone.getId(), network.getId(), null, null); if (publicIp == null) { @@ -367,7 +367,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws }); firewallService.applyIngressFwRules(publicIp.getId(), owner); } - logger.debug("Created network rules for VNF appliance on isolated network " + network.getUuid()); + logger.debug("Created network rules for VNF appliance on isolated network {}", network); } } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java index a81673151432..aac5d1277a60 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImpl.java @@ -207,7 +207,7 @@ public VolumeResponse importVolume(ImportVolumeCmd cmd) { volumeApiService.validateCustomDiskOfferingSizeRange(volume.getVirtualSize() / ByteScaleUtils.GiB); } if (!volumeApiService.doesTargetStorageSupportDiskOffering(pool, diskOffering.getTags())) { - logFailureAndThrowException(String.format("Disk offering: %s storage tags are not compatible with selected storage pool: %s", diskOffering.getUuid(), pool.getUuid())); + logFailureAndThrowException(String.format("Disk offering: %s storage tags are not compatible with selected storage pool: %s", diskOffering, pool)); } // 7. create records @@ -249,7 +249,7 @@ protected List listVolumesForImportInternal(StoragePoolVO poo GetVolumesOnStorageCommand command = new GetVolumesOnStorageCommand(storageTO, volumePath, keyword); Answer answer = agentManager.easySend(host.getId(), command); if (answer == null || !(answer instanceof GetVolumesOnStorageAnswer)) { - logFailureAndThrowException("Cannot get volumes on storage pool via host " + host.getName()); + logFailureAndThrowException(String.format("Cannot get volumes on storage pool via host %s", host)); } if (!answer.getResult()) { logFailureAndThrowException("Volume cannot be imported due to " + answer.getDetails()); @@ -286,10 +286,10 @@ protected StoragePoolVO checkIfPoolAvailable(Long poolId) { logFailureAndThrowException(String.format("Storage pool (ID: %s) does not exist", poolId)); } if (pool.isInMaintenance()) { - logFailureAndThrowException(String.format("Storage pool (name: %s) is in maintenance", pool.getName())); + logFailureAndThrowException(String.format("Storage pool %s is in maintenance", pool)); } if (!StoragePoolStatus.Up.equals(pool.getStatus())) { - logFailureAndThrowException(String.format("Storage pool (ID: %s) is not Up: %s", pool.getName(), pool.getStatus())); + logFailureAndThrowException(String.format("Storage pool %s is not Up: %s", pool, pool.getStatus())); } return pool; } @@ -298,7 +298,7 @@ protected Pair findHostAndLocalPathForVolumeImport(StoragePoolVO List hosts = new ArrayList<>(); switch (pool.getScope()) { case HOST: - return findHostAndLocalPathForVolumeImportForHostScope(pool.getId()); + return findHostAndLocalPathForVolumeImportForHostScope(pool); case CLUSTER: hosts = hostDao.findHypervisorHostInCluster((pool.getClusterId())); break; @@ -316,8 +316,8 @@ protected Pair findHostAndLocalPathForVolumeImport(StoragePoolVO return null; } - private Pair findHostAndLocalPathForVolumeImportForHostScope(Long poolId) { - List storagePoolHostVOs = storagePoolHostDao.listByPoolId(poolId); + private Pair findHostAndLocalPathForVolumeImportForHostScope(StoragePoolVO pool) { + List storagePoolHostVOs = storagePoolHostDao.listByPoolId(pool.getId()); if (CollectionUtils.isNotEmpty(storagePoolHostVOs)) { for (StoragePoolHostVO storagePoolHostVO : storagePoolHostVOs) { HostVO host = hostDao.findById(storagePoolHostVO.getHostId()); @@ -326,7 +326,7 @@ private Pair findHostAndLocalPathForVolumeImportForHostScope(Lon } } } - logFailureAndThrowException("No host found to perform volume import on pool: " + poolId); + logFailureAndThrowException(String.format("No host found to perform volume import on pool: %s", pool)); return null; } @@ -408,20 +408,20 @@ protected DiskOfferingVO getOrCreateDiskOffering(Account owner, Long diskOfferin logFailureAndThrowException(String.format("Disk offering %s does not exist", diskOfferingId)); } if (!DiskOffering.State.Active.equals(diskOfferingVO.getState())) { - logFailureAndThrowException(String.format("Disk offering with ID %s is not active", diskOfferingId)); + logFailureAndThrowException(String.format("Disk offering %s is not active", diskOfferingVO)); } if (diskOfferingVO.isUseLocalStorage() != isLocal) { - logFailureAndThrowException(String.format("Disk offering with ID %s should use %s storage", diskOfferingId, isLocal ? "local": "shared")); + logFailureAndThrowException(String.format("Disk offering %s should use %s storage", diskOfferingVO, isLocal ? "local": "shared")); } if (diskOfferingVO.getEncrypt()) { - logFailureAndThrowException(String.format("Disk offering with ID %s should not support volume encryption", diskOfferingId)); + logFailureAndThrowException(String.format("Disk offering %s should not support volume encryption", diskOfferingVO)); } // check if disk offering is accessible by the account/owner try { configMgr.checkDiskOfferingAccess(owner, diskOfferingVO, dcDao.findById(zoneId)); return diskOfferingVO; } catch (PermissionDeniedException ex) { - logFailureAndThrowException(String.format("Disk offering with ID %s is not accessible by owner %s", diskOfferingId, owner)); + logFailureAndThrowException(String.format("Disk offering %s is not accessible by owner %s", diskOfferingVO, owner)); } } return getOrCreateDefaultDiskOfferingIdForVolumeImport(isLocal); @@ -462,7 +462,7 @@ protected void checkResourceLimitForImportVolume(Account owner, VolumeOnStorageT resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.volume); resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.primary_storage, volumeSize); } catch (ResourceAllocationException e) { - logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error("VM resource allocation error for account: {}", owner, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } } @@ -482,7 +482,7 @@ private void publicUsageEventForVolumeImportAndUnmanage(VolumeVO volumeVO, boole volumeVO.getId(), volumeVO.getName(), volumeVO.getDiskOfferingId(), null, volumeVO.getSize(), Volume.class.getName(), volumeVO.getUuid(), volumeVO.isDisplayVolume()); } catch (Exception e) { - logger.error(String.format("Failed to publish volume ID: %s event or usage records during volume import/unmanage", volumeVO.getUuid()), e); + logger.error("Failed to publish volume: {} event or usage records during volume import/unmanage", volumeVO, e); } } @@ -497,13 +497,13 @@ private VolumeVO checkIfVolumeCanBeUnmanaged(long volumeId) { logFailureAndThrowException(String.format("Volume (ID: %s) does not exist", volumeId)); } if (!Volume.State.Ready.equals(volumeVO.getState())) { - logFailureAndThrowException(String.format("Volume (ID: %s) is not ready", volumeId)); + logFailureAndThrowException(String.format("Volume %s is not ready", volumeVO)); } if (volumeVO.getEncryptFormat() != null) { - logFailureAndThrowException(String.format("Volume (ID: %s) is encrypted", volumeId)); + logFailureAndThrowException(String.format("Volume %s is encrypted", volumeVO)); } if (volumeVO.getAttached() != null || volumeVO.getInstanceId() != null) { - logFailureAndThrowException(String.format("Volume (ID: %s) is attached to VM (ID: %s)", volumeId, volumeVO.getInstanceId())); + logFailureAndThrowException(String.format("Volume %s is attached to VM (ID: %s)", volumeVO, volumeVO.getInstanceId())); } return volumeVO; } diff --git a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java index f35f69fb8bf2..6574489c827d 100644 --- a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java @@ -192,11 +192,8 @@ public void setResetTokenAndSend(UserAccount userAccount) { mailProperties.setRecipients(addresses); mailSender.sendMail(mailProperties); - logger.debug(String.format( - "User password reset email for user id: %d username: %s account id: %d" + - " domain id:%d sent to %s with token expiry at %s", - userAccount.getId(), username, userAccount.getAccountId(), - userAccount.getDomainId(), email, resetTokenExpiryTime)); + logger.debug("User password reset email for user {} account id: {} domain id: {} sent to {} with token expiry at {}", + userAccount, userAccount.getAccountId(), userAccount.getDomainId(), email, resetTokenExpiryTime); } @Override @@ -205,10 +202,8 @@ public boolean validateAndResetPassword(UserAccount user, String token, String p UserDetailVO resetTokenExpiryDate = userDetailsDao.findDetail(user.getId(), PasswordResetTokenExpiryDate); if (resetTokenDetail == null || resetTokenExpiryDate == null) { - logger.debug(String.format( - "Failed to reset password. No reset token found for user id: %d username: %s account" + - " id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. No reset token found for user {} account" + + " id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("No reset token found for user %s", user.getUsername())); } @@ -217,31 +212,23 @@ public boolean validateAndResetPassword(UserAccount user, String token, String p Date now = new Date(); String resetToken = resetTokenDetail.getValue(); if (StringUtils.isEmpty(resetToken)) { - logger.debug(String.format( - "Failed to reset password. No reset token found for user id: %d username: %s account" + - " id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. No reset token found for user {} account" + + " id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("No reset token found for user %s", user.getUsername())); } if (!resetToken.equals(token)) { - logger.debug(String.format( - "Failed to reset password. Invalid reset token for user id: %d username: %s " + - "account id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. Invalid reset token for user {} " + + "account id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Invalid reset token for user %s", user.getUsername())); } if (now.after(resetTokenExpiryTime)) { - logger.debug(String.format( - "Failed to reset password. Reset token has expired for user id: %d username: %s " + - "account id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Failed to reset password. Reset token has expired for user {} " + + "account id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Reset token has expired for user %s", user.getUsername())); } resetPassword(user, password); - logger.debug(String.format( - "Password reset successful for user id: %d username: %s account id: %d domain id: %d", - user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + logger.debug("Password reset successful for user {} account id: {} domain id: {}", user, user.getAccountId(), user.getDomainId()); return true; } diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java index 2898fd5d0f36..0a8f3b99a099 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java @@ -115,7 +115,7 @@ public VMScheduleResponse createSchedule(CreateVMScheduleCmd cmd) { description = String.format("%s - %s", action, DateUtil.getHumanReadableSchedule(cronExpression)); } else description = cmd.getDescription(); - logger.warn(String.format("Using timezone [%s] for running the schedule for VM [%s], as an equivalent of [%s].", timeZoneId, vm.getUuid(), cmdTimeZone)); + logger.warn("Using timezone [{}] for running the schedule for VM [{}], as an equivalent of [{}].", timeZoneId, vm, cmdTimeZone); String finalDescription = description; VMSchedule.Action finalAction = action; @@ -212,8 +212,8 @@ public VMScheduleResponse updateSchedule(UpdateVMScheduleCmd cmd) { timeZone = TimeZone.getTimeZone(cmdTimeZone); timeZoneId = timeZone.getID(); if (!timeZoneId.equals(cmdTimeZone)) { - logger.warn(String.format("Using timezone [%s] for running the schedule [%s] for VM %s, as an equivalent of [%s].", - timeZoneId, vmSchedule.getSchedule(), vmSchedule.getVmId(), cmdTimeZone)); + logger.warn("Using timezone [{}] for running the schedule [{}] for VM {}, as an equivalent of [{}].", + timeZoneId, vmSchedule.getSchedule(), userVmManager.getUserVm(vmSchedule.getVmId()), cmdTimeZone); } vmSchedule.setTimeZone(timeZoneId); } else { diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java index 139a4d0be1f4..7410fb1c2655 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java @@ -112,7 +112,8 @@ public void updateScheduledJob(VMScheduleVO vmSchedule) { @Override public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { if (!vmSchedule.getEnabled()) { - logger.debug(String.format("VM Schedule [id=%s] for VM [id=%s] is disabled. Not scheduling next job.", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.debug("VM Schedule {} for VM {} with id {} is disabled. Not scheduling next job.", + vmSchedule::toString, () -> userVmManager.getUserVm(vmSchedule.getVmId()), vmSchedule::getVmId); return null; } @@ -122,7 +123,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { VirtualMachine vm = userVmManager.getUserVm(vmSchedule.getVmId()); if (vm == null) { - logger.info(String.format("VM [id=%s] is removed. Disabling VM schedule [id=%s].", vmSchedule.getVmId(), vmSchedule.getUuid())); + logger.info("VM id={} is removed. Disabling VM schedule {}.", vmSchedule.getVmId(), vmSchedule); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -140,7 +141,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { zonedEndDate = ZonedDateTime.ofInstant(endDate.toInstant(), vmSchedule.getTimeZoneId()); } if (zonedEndDate != null && now.isAfter(zonedEndDate)) { - logger.info(String.format("End time is less than current time. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.info("End time is less than current time. Disabling VM schedule {} for VM {}.", vmSchedule, vm); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -154,7 +155,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { } if (ts == null) { - logger.info(String.format("No next schedule found. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.info("No next schedule found. Disabling VM schedule {} for VM {}.", vmSchedule, vm); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -165,7 +166,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { try { vmScheduledJobDao.persist(scheduledJob); ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, vm.getAccountId(), actionEventMap.get(vmSchedule.getAction()), - String.format("Scheduled action (%s) [vmId: %s scheduleId: %s] at %s", vmSchedule.getAction(), vm.getUuid(), vmSchedule.getUuid(), scheduledDateTime), + String.format("Scheduled action (%s) [vm: %s, schedule: %s] at %s", vmSchedule.getAction(), vm, vmSchedule, scheduledDateTime), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), true, 0); } catch (EntityExistsException exception) { logger.debug("Job is already scheduled."); @@ -246,7 +247,7 @@ private void scheduleNextJobs(Date timestamp) { try { scheduleNextJob(schedule, timestamp); } catch (Exception e) { - logger.warn("Error in scheduling next job for schedule " + schedule.getUuid(), e); + logger.warn("Error in scheduling next job for schedule {}", schedule, e); } } } @@ -272,7 +273,8 @@ void executeJobs(Map jobsToExecute) { if (logger.isDebugEnabled()) { final Date scheduledTimestamp = vmScheduledJob.getScheduledTime(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - logger.debug(String.format("Executing %s for VM id %d for schedule id: %d at %s", vmScheduledJob.getAction(), vmScheduledJob.getVmId(), vmScheduledJob.getVmScheduleId(), displayTime)); + logger.debug("Executing {} for VM {} for scheduled job: {} at {}", + vmScheduledJob.getAction(), vm, vmScheduledJob, displayTime); } tmpVMScheduleJob = vmScheduledJobDao.acquireInLockTable(vmScheduledJob.getId()); @@ -282,7 +284,7 @@ void executeJobs(Map jobsToExecute) { vmScheduledJobDao.update(vmScheduledJob.getId(), tmpVMScheduleJob); } } catch (final Exception e) { - logger.warn(String.format("Executing scheduled job id: %s failed due to %s", vmScheduledJob.getId(), e)); + logger.warn("Executing scheduled job {} failed due to {}", vmScheduledJob, e); } finally { if (tmpVMScheduleJob != null) { vmScheduledJobDao.releaseFromLockTable(vmScheduledJob.getId()); @@ -293,13 +295,14 @@ void executeJobs(Map jobsToExecute) { Long processJob(VMScheduledJob vmScheduledJob, VirtualMachine vm) { if (!Arrays.asList(VirtualMachine.State.Running, VirtualMachine.State.Stopped).contains(vm.getState())) { - logger.info(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is invalid state: %s", vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); + logger.info("Skipping action ({}) for [vm: {}, scheduled job: {}] because VM is invalid state: {}", + vmScheduledJob.getAction(), vm, vmScheduledJob, vm.getState()); return null; } final Long eventId = ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), null, actionEventMap.get(vmScheduledJob.getAction()), true, - String.format("Executing action (%s) for VM Id:%s", vmScheduledJob.getAction(), vm.getUuid()), + String.format("Executing action (%s) for VM: %s", vmScheduledJob.getAction(), vm), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), 0); if (vm.getState() == VirtualMachine.State.Running) { @@ -317,8 +320,8 @@ Long processJob(VMScheduledJob vmScheduledJob, VirtualMachine vm) { return executeStartVMJob(vm, eventId); } - logger.warn(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is in state: %s", - vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); + logger.warn("Skipping action ({}) for [vm: {}, scheduled job: {}] because VM is in state: {}", + vmScheduledJob.getAction(), vm, vmScheduledJob, vm.getState()); return null; } @@ -329,7 +332,8 @@ private void skipJobs(Map jobsToExecute, Map(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn( new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); - Mockito.when(balancedAlgorithm.getMetrics(cluster.getId(), vm1, serviceOffering, destHost, new HashMap<>(), + Mockito.when(balancedAlgorithm.getMetrics(cluster, vm1, serviceOffering, destHost, new HashMap<>(), new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 0.5, 1.5)); - Mockito.when(balancedAlgorithm.getMetrics(cluster.getId(), vm2, serviceOffering, destHost, new HashMap<>(), + Mockito.when(balancedAlgorithm.getMetrics(cluster, vm2, serviceOffering, destHost, new HashMap<>(), new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 2.5, 1.5)); Pair bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm, diff --git a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java index d7684b824e3e..272e79fea490 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java @@ -76,8 +76,6 @@ public void getImageStoreIfThereIsHeuristicRuleTestZoneDoesNotHaveHeuristicRuleS DataStore result = heuristicRuleHelperSpy.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.TEMPLATE, null); - Mockito.verify(loggerMock, Mockito.times(1)).debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", - zoneId, HeuristicType.TEMPLATE)); Assert.assertNull(result); } @@ -92,7 +90,6 @@ public void getImageStoreIfThereIsHeuristicRuleTestZoneHasHeuristicRuleShouldCal DataStore result = heuristicRuleHelperSpy.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.TEMPLATE, null); - Mockito.verify(loggerMock, Mockito.times(1)).debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicVOMock, zoneId)); Assert.assertNull(result); } diff --git a/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java index dab465954381..8982034a8c58 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/volume/VolumeImportUnmanageManagerImplTest.java @@ -396,7 +396,7 @@ public void testUnmanageVolumeNotReady() { volumeImportUnmanageManager.unmanageVolume(volumeId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is not ready", volumeId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume %s is not ready", volumeVO)); } } @@ -409,7 +409,7 @@ public void testUnmanageVolumeEncrypted() { volumeImportUnmanageManager.unmanageVolume(volumeId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is encrypted", volumeId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume %s is encrypted", volumeVO)); } } @@ -421,7 +421,7 @@ public void testUnmanageVolumeAttached() { volumeImportUnmanageManager.unmanageVolume(volumeId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume (ID: %s) is attached to VM (ID: %s)", volumeId, volumeVO.getInstanceId())); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Volume %s is attached to VM (ID: %s)", volumeVO, volumeVO.getInstanceId())); } } @@ -444,7 +444,7 @@ public void testCheckIfPoolAvailableInMaintenance() { volumeImportUnmanageManager.checkIfPoolAvailable(poolId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (name: %s) is in maintenance", storagePoolName)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool %s is in maintenance", storagePoolVO)); } } @@ -457,7 +457,7 @@ public void testCheckIfPoolAvailableDisabled() { volumeImportUnmanageManager.checkIfPoolAvailable(poolId); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool (ID: %s) is not Up: %s", storagePoolName, StoragePoolStatus.Disabled)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Storage pool %s is not Up: %s", storagePoolVO, StoragePoolStatus.Disabled)); } } @@ -535,7 +535,7 @@ public void testGetOrCreateDiskOfferingNotActive() { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s is not active", diskOfferingId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s is not active", diskOfferingVO)); } } @@ -549,7 +549,7 @@ public void testGetOrCreateDiskOfferingNotLocal() { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s should use %s storage", diskOfferingId, isLocal ? "local" : "shared")); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s should use %s storage", diskOfferingVO, isLocal ? "local" : "shared")); } } @@ -564,7 +564,7 @@ public void testGetOrCreateDiskOfferingForVolumeEncryption() { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s should not support volume encryption", diskOfferingId)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s should not support volume encryption", diskOfferingVO)); } } @@ -579,7 +579,7 @@ public void testGetOrCreateDiskOfferingNoPermission() { volumeImportUnmanageManager.getOrCreateDiskOffering(account, diskOfferingId, zoneId, isLocal); Assert.fail("it should fail"); } catch (CloudRuntimeException ex) { - verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering with ID %s is not accessible by owner %s", diskOfferingId, account)); + verify(volumeImportUnmanageManager).logFailureAndThrowException(String.format("Disk offering %s is not accessible by owner %s", diskOfferingVO, account)); } } diff --git a/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java index cad36b962ac2..c51f07e96f7e 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java @@ -128,7 +128,7 @@ private void executeProcessJobWithVMStateAndActionNonSkipped(VirtualMachine.Stat actionEventUtilsMocked.verify(() -> ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), null, actionEventMap.get(action), true, - String.format("Executing action (%s) for VM Id:%s", vmScheduledJob.getAction(), vm.getUuid()), + String.format("Executing action (%s) for VM: %s", vmScheduledJob.getAction(), vm), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), 0)); Assert.assertEquals(expectedValue, jobId); } diff --git a/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java index 9617591ae76f..e8686e95ea00 100644 --- a/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/BucketUsageParser.java @@ -52,7 +52,7 @@ void init() { public static boolean parse(AccountVO account, Date startDate, Date endDate) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Parsing all Bucket usage events for account: " + account.getId()); + LOGGER.debug("Parsing all Bucket usage events for account {}", account); } if ((endDate == null) || endDate.after(new Date())) { diff --git a/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java index a27e2ba226e4..39a51e8554f8 100644 --- a/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/NetworksUsageParser.java @@ -53,7 +53,7 @@ void init() { } public static boolean parse(AccountVO account, Date startDate, Date endDate) { - LOGGER.debug(String.format("Parsing all networks usage events for account [%s].", account.getId())); + LOGGER.debug("Parsing all networks usage events for account {}", account); if ((endDate == null) || endDate.after(new Date())) { endDate = new Date(); } @@ -84,7 +84,7 @@ public static boolean parse(AccountVO account, Date startDate, Date endDate) { long networkId = usageNetwork.getNetworkId(); long networkOfferingId = usageNetwork.getNetworkOfferingId(); LOGGER.debug(String.format("Creating network usage record with id [%s], network offering [%s], usage [%s], startDate [%s], and endDate [%s], for account [%s].", - networkId, networkOfferingId, usageDisplay, startDate, endDate, account.getId())); + networkId, networkOfferingId, usageDisplay, startDate, endDate, account)); String description = String.format("Network usage for network ID: %d, network offering: %d", usageNetwork.getNetworkId(), usageNetwork.getNetworkOfferingId()); UsageVO usageRecord = diff --git a/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java index 5dcb5d08a6cb..794e066c8974 100644 --- a/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/VpcUsageParser.java @@ -50,7 +50,7 @@ void init() { } public static boolean parse(AccountVO account, Date startDate, Date endDate) { - LOGGER.debug(String.format("Parsing all VPC usage events for account [%s].", account.getId())); + LOGGER.debug("Parsing all VPC usage events for account {}", account); if ((endDate == null) || endDate.after(new Date())) { endDate = new Date(); } @@ -79,8 +79,8 @@ public static boolean parse(AccountVO account, Date startDate, Date endDate) { String usageDisplay = dFormat.format(usage); long vpcId = usageVPC.getVpcId(); - LOGGER.debug(String.format("Creating VPC usage record with id [%s], usage [%s], startDate [%s], and endDate [%s], for account [%s].", - vpcId, usageDisplay, startDate, endDate, account.getId())); + LOGGER.debug("Creating VPC usage record with id [{}], usage [{}], startDate [{}], and endDate [{}], for account [{}].", + vpcId, usageDisplay, startDate, endDate, account); String description = String.format("VPC usage for VPC ID: %d", usageVPC.getVpcId()); UsageVO usageRecord = From 499df2dcb9c34de46cf8d54984acb9877e10bb03 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Fri, 27 Dec 2024 15:08:06 +0530 Subject: [PATCH 18/22] Fixups --- .../src/main/java/com/cloud/agent/Agent.java | 23 +++++++-------- .../java/com/cloud/agent/api/to/NfsTO.java | 8 +++++ .../java/com/cloud/agent/api/to/S3TO.java | 8 +++++ .../java/com/cloud/agent/api/to/SwiftTO.java | 8 +++++ .../com/cloud/network/NetworkProfile.java | 5 +++- .../CreateSnapshotFromVMSnapshotCmd.java | 4 +-- .../cloudstack/vm/UnmanagedInstanceTO.java | 28 +++++++----------- .../agent/api/to/LoadBalancerTOTest.java | 16 +++++----- .../cloudstack/storage/to/ImageStoreTO.java | 13 +++------ .../storage/to/PrimaryDataStoreTO.java | 5 +++- .../storage/to/TemplateObjectTO.java | 5 +++- .../cloudstack/storage/to/VolumeObjectTO.java | 5 +++- .../com/cloud/agent/manager/AgentAttache.java | 9 +++--- .../cloud/vm/VirtualMachineManagerImpl.java | 2 +- .../entity/api/db/EngineClusterVO.java | 2 +- .../entity/api/db/EngineDataCenterVO.java | 2 +- .../entity/api/db/EngineHostPodVO.java | 2 +- .../entity/api/db/EngineHostVO.java | 2 +- .../orchestration/StorageOrchestrator.java | 4 +-- .../src/main/java/com/cloud/dc/HostPodVO.java | 2 +- .../src/main/java/com/cloud/dc/VlanVO.java | 29 ++++--------------- .../main/java/com/cloud/domain/DomainVO.java | 2 +- .../src/main/java/com/cloud/host/HostVO.java | 2 +- .../cloud/network/as/AutoScalePolicyVO.java | 2 +- .../cloud/network/as/AutoScaleVmGroupVO.java | 10 +++---- .../network/as/AutoScaleVmProfileVO.java | 4 +-- .../java/com/cloud/network/as/CounterVO.java | 2 +- .../dao/PhysicalNetworkServiceProviderVO.java | 2 +- .../cloud/network/dao/PhysicalNetworkVO.java | 4 +-- .../cloud/network/rules/FirewallRuleVO.java | 2 +- .../cloud/network/vpc/NetworkACLItemVO.java | 2 +- .../com/cloud/network/vpc/VpcOfferingVO.java | 2 +- .../cloud/offerings/NetworkOfferingVO.java | 2 +- .../cloud/projects/ProjectInvitationVO.java | 2 +- .../java/com/cloud/projects/ProjectVO.java | 2 +- .../com/cloud/storage/DiskOfferingVO.java | 2 +- .../java/com/cloud/storage/SnapshotVO.java | 8 ++--- .../main/java/com/cloud/storage/VolumeVO.java | 3 +- .../com/cloud/vm/dao/NicSecondaryIpVO.java | 2 +- .../org/apache/cloudstack/acl/RoleVO.java | 3 +- .../cloud/entity/api/db/VMEntityVO.java | 2 +- .../storage/datastore/db/ImageStoreVO.java | 2 +- .../storage/datastore/db/StoragePoolVO.java | 2 +- .../storage/image/store/TemplateObject.java | 8 ++--- .../snapshot/DefaultSnapshotStrategy.java | 2 +- .../storage/snapshot/SnapshotObject.java | 8 ++--- .../datastore/PrimaryDataStoreImpl.java | 4 +-- .../storage/volume/VolumeObject.java | 7 ++--- .../cloud/cluster/ManagementServerHostVO.java | 2 +- .../framework/jobs/impl/AsyncJobVO.java | 28 ++++-------------- .../affinity/ExplicitDedicationProcessor.java | 2 +- .../baremetal/manager/BareMetalPlanner.java | 26 ++++++++--------- .../kvm/storage/LibvirtStoragePool.java | 5 ++-- .../cloud/network/BigSwitchBcfDeviceVO.java | 2 +- .../network/element/BrocadeVcsElement.java | 4 +-- .../network/cisco/CiscoVnmcControllerVO.java | 4 +-- .../element/ElasticLoadBalancerElement.java | 2 +- .../lb/ElasticLoadBalancerManagerImpl.java | 2 +- .../network/lb/LoadBalanceRuleHandler.java | 2 +- .../network/element/NetscalerElement.java | 2 +- .../network/vm/NetScalerVMManagerImpl.java | 2 +- .../ElastistorPrimaryDataStoreLifeCycle.java | 4 +-- .../provider/SolidFireHostListener.java | 2 +- .../provider/StorPoolHostListener.java | 2 +- .../com/cloud/dc/DedicatedResourceVO.java | 2 +- .../java/com/cloud/hypervisor/KVMGuru.java | 2 +- .../com/cloud/network/NetworkModelImpl.java | 4 +-- .../network/as/AutoScaleManagerImpl.java | 2 +- .../lb/LoadBalancingRulesManagerImpl.java | 2 +- .../cloud/server/ManagementServerImpl.java | 12 ++++---- .../java/com/cloud/server/StatsCollector.java | 2 +- .../cloud/storage/VolumeApiServiceImpl.java | 2 +- .../cloud/tags/TaggedResourceManagerImpl.java | 7 ++--- .../cloud/template/TemplateAdapterBase.java | 2 +- .../diagnostics/to/DiagnosticsDataObject.java | 8 ++--- .../provider/host/HAAbstractHostProvider.java | 10 +++---- .../PowerOperationTask.java | 2 +- 77 files changed, 202 insertions(+), 221 deletions(-) diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index d760897fbeae..c84179d66609 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -132,8 +132,8 @@ public int value() { ServerResource _resource; Link _link; Long _id; - String uuid; - String name; + String _uuid; + String _name; Timer _timer = new Timer("Agent Timer"); Timer certTimer; @@ -184,10 +184,10 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour resource.setAgentControl(this); final String value = _shell.getPersistentProperty(getResourceName(), "id"); - uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); - name = _shell.getPersistentProperty(getResourceName(), "name"); + _uuid = _shell.getPersistentProperty(getResourceName(), "uuid"); + _name = _shell.getPersistentProperty(getResourceName(), "name"); _id = value != null ? Long.parseLong(value) : null; - logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), uuid, name); + logger.info("Initialising agent [id: {}, uuid: {}, name: {}]", ObjectUtils.defaultIfNull(_id, ""), _uuid, _name); final Map params = new HashMap<>(); @@ -217,7 +217,7 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour "agentRequest-Handler")); logger.info("Agent [id = {}, uuid: {}, name: {}] : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", - ObjectUtils.defaultIfNull(_id, "new"), uuid, name, getResourceName(), + ObjectUtils.defaultIfNull(_id, "new"), _uuid, _name, getResourceName(), _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); } @@ -382,28 +382,25 @@ public Long getId() { } public void setId(final Long id) { - logger.debug("Set agent id {}", id); _id = id; _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } public String getUuid() { - return uuid; + return _uuid; } public void setUuid(String uuid) { - logger.debug("Set agent uuid {}", uuid); - this.uuid = uuid; + this._uuid = uuid; _shell.setPersistentProperty(getResourceName(), "uuid", uuid); } public String getName() { - return name; + return _name; } public void setName(String name) { - logger.debug("Set agent name {}", name); - this.name = name; + this._name = name; _shell.setPersistentProperty(getResourceName(), "name", name); } diff --git a/api/src/main/java/com/cloud/agent/api/to/NfsTO.java b/api/src/main/java/com/cloud/agent/api/to/NfsTO.java index 0f6511e83114..eeddbf649a77 100644 --- a/api/src/main/java/com/cloud/agent/api/to/NfsTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/NfsTO.java @@ -17,6 +17,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class NfsTO implements DataStoreTO { @@ -41,6 +42,13 @@ public NfsTO(String url, DataStoreRole role) { } + @Override + public String toString() { + return String.format("NfsTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "uuid", "_url", "_role", "nfsVersion")); + } + @Override public String getUrl() { return _url; diff --git a/api/src/main/java/com/cloud/agent/api/to/S3TO.java b/api/src/main/java/com/cloud/agent/api/to/S3TO.java index 233238cf793d..936f8168b1e8 100644 --- a/api/src/main/java/com/cloud/agent/api/to/S3TO.java +++ b/api/src/main/java/com/cloud/agent/api/to/S3TO.java @@ -22,6 +22,7 @@ import com.cloud.agent.api.LogLevel.Log4jLevel; import com.cloud.storage.DataStoreRole; import com.cloud.utils.storage.S3.ClientOptions; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public final class S3TO implements ClientOptions, DataStoreTO { @@ -68,6 +69,13 @@ public S3TO(final Long id, final String uuid, final String accessKey, final Stri } + @Override + public String toString() { + return String.format("S3TO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "bucketName")); + } + public Long getId() { return this.id; } diff --git a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java index b89dfea40e0c..14038566fbd3 100644 --- a/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/SwiftTO.java @@ -18,6 +18,7 @@ import com.cloud.storage.DataStoreRole; import com.cloud.utils.SwiftUtil; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { Long id; @@ -41,6 +42,13 @@ public SwiftTO(Long id, String url, String account, String userName, String key, this.storagePolicy = storagePolicy; } + @Override + public String toString() { + return String.format("SwiftTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "account", "userName")); + } + public Long getId() { return id; } diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 641c67a39daf..2e8efb489308 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -22,6 +22,7 @@ import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class NetworkProfile implements Network { private final long id; @@ -386,7 +387,9 @@ public Integer getNetworkCidrSize() { @Override public String toString() { - return String.format("NetworkProfile {\"id\": %s, \"name\": \"%s\", \"uuid\": \"%s\", \"networkofferingid\": %d}", id, name, uuid, networkOfferingId); + return String.format("NetworkProfile %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "networkOfferingId")); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java index f281fbaea2e3..cdd908dfb87d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java @@ -165,8 +165,8 @@ public void create() throws ResourceAllocationException { @Override public void execute() { VMSnapshot vmSnapshot = _vmSnapshotService.getVMSnapshotById(getVMSnapshotId()); - logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot {} with id {} and snapshot [id: {}, uuid: {}] starts: {}", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid(), System.currentTimeMillis()); - CallContext.current().setEventDetails("Vm Snapshot Id: "+ vmSnapshot.getUuid()); + logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot {} with id {} and snapshot [id: {}, uuid: {}]", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid()); + CallContext.current().setEventDetails("Vm Snapshot Id: " + vmSnapshot.getUuid()); Snapshot snapshot = null; try { snapshot = _snapshotService.backupSnapshotFromVmSnapshot(getEntityId(), getVmId(), getVolumeId(), getVMSnapshotId()); diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 95904483391c..191cba9ed1c3 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -17,6 +17,8 @@ package org.apache.cloudstack.vm; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import java.util.List; @@ -181,12 +183,9 @@ public void setVncPassword(String vncPassword) { @Override public String toString() { - return "UnmanagedInstanceTO{" + - "name='" + name + '\'' + - ", internalCSName='" + internalCSName + '\'' + - ", hostName='" + hostName + '\'' + - ", clusterName='" + clusterName + '\'' + - '}'; + return String.format("UnmanagedInstanceTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "name", "internalCSName", "hostName", "clusterName")); } public static class Disk { @@ -332,12 +331,9 @@ public int getDatastorePort() { @Override public String toString() { - return "Disk {" + - "diskId='" + diskId + '\'' + - ", capacity=" + toHumanReadableSize(capacity) + - ", controller='" + controller + '\'' + - ", controllerUnit=" + controllerUnit + - "}"; + return String.format("Disk %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "diskId", "internalCSName", "controller", "controllerUnit")); } } @@ -434,11 +430,9 @@ public void setPciSlot(String pciSlot) { @Override public String toString() { - return "Nic{" + - "nicId='" + nicId + '\'' + - ", adapterType='" + adapterType + '\'' + - ", macAddress='" + macAddress + '\'' + - "}"; + return String.format("Nic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "nicId", "adapterType", "macAddress")); } } } diff --git a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java index 8fef19ef3b62..e7ecbebae7bd 100644 --- a/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java +++ b/api/src/test/java/com/cloud/agent/api/to/LoadBalancerTOTest.java @@ -41,19 +41,19 @@ public class LoadBalancerTOTest { LoadBalancerTO.AutoScaleVmGroupTO vmGroup; private static final Long counterId = 1L; - private static final String counterUiid = "1111-1111-1100"; + private static final String counterUuid = "1111-1111-1100"; private static final String counterName = "counter name"; private static final Counter.Source counterSource = Counter.Source.CPU; private static final String counterValue = "counter value"; private static final String counterProvider = "VIRTUALROUTER"; private static final Long conditionId = 2L; - private static final String conditionUiid = "1111-1111-1110"; + private static final String conditionUuid = "1111-1111-1110"; private static final Long threshold = 100L; private static final Condition.Operator relationalOperator = Condition.Operator.GT; private static final Long scaleUpPolicyId = 11L; - private static final String scaleUpPolicyUiid = "1111-1111-1111"; + private static final String scaleUpPolicyUuid = "1111-1111-1111"; private static final int scaleUpPolicyDuration = 61; private static final int scaleUpPolicyQuietTime = 31; private static final Date scaleUpPolicyLastQuietTime = new Date(); @@ -88,12 +88,12 @@ public class LoadBalancerTOTest { @Before public void setUp() { - counter = new LoadBalancerTO.CounterTO(counterId, counterUiid, counterName, counterSource, counterValue, counterProvider); - condition = new LoadBalancerTO.ConditionTO(conditionId, conditionUiid, threshold, relationalOperator, counter); - scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyUiid, scaleUpPolicyDuration, + counter = new LoadBalancerTO.CounterTO(counterId, counterUuid, counterName, counterSource, counterValue, counterProvider); + condition = new LoadBalancerTO.ConditionTO(conditionId, conditionUuid, threshold, relationalOperator, counter); + scaleUpPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleUpPolicyId, scaleUpPolicyUuid, scaleUpPolicyDuration, scaleUpPolicyQuietTime, scaleUpPolicyLastQuietTime, AutoScalePolicy.Action.SCALEUP, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); - scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleUpPolicyUiid, scaleDownPolicyDuration, + scaleDownPolicy = new LoadBalancerTO.AutoScalePolicyTO(scaleDownPolicyId, scaleUpPolicyUuid, scaleDownPolicyDuration, scaleDownPolicyQuietTime, scaleDownPolicyLastQuietTime, AutoScalePolicy.Action.SCALEDOWN, Arrays.asList(new LoadBalancerTO.ConditionTO[]{ condition }), false); vmProfile = new LoadBalancerTO.AutoScaleVmProfileTO(zoneId, domainId, cloudStackApiUrl, autoScaleUserApiKey, @@ -116,7 +116,7 @@ public void testCounterTO() { @Test public void testConditionTO() { Assert.assertEquals(conditionId, condition.getId()); - Assert.assertEquals(conditionUiid, condition.getUuid()); + Assert.assertEquals(conditionUuid, condition.getUuid()); Assert.assertEquals((long) threshold, condition.getThreshold()); Assert.assertEquals(relationalOperator, condition.getRelationalOperator()); Assert.assertEquals(counter, condition.getCounter()); diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java index 046a2ab9410c..4bf292056732 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/ImageStoreTO.java @@ -23,6 +23,7 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class ImageStoreTO implements DataStoreTO { private String type; @@ -78,15 +79,9 @@ public DataStoreRole getRole() { @Override public String toString() { - return new StringBuilder("ImageStoreTO[type=").append(type) - .append("|provider=") - .append(providerName) - .append("|role=") - .append(role) - .append("|uri=") - .append(uri) - .append("]") - .toString(); + return String.format("ImageStoreTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "uuid", "type", "providerName", "role", "uri")); } @Override diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index e12c61b3dcf7..2c758fa50874 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -26,6 +26,7 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.StoragePoolType; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class PrimaryDataStoreTO implements DataStoreTO { public static final String MANAGED = PrimaryDataStore.MANAGED; @@ -145,7 +146,9 @@ public String getPathSeparator() { @Override public String toString() { - return String.format("PrimaryDataStoreTO[uuid=%s|name=%s|id=%d|pooltype=%s]", uuid, name, id, poolType); + return String.format("PrimaryDataStoreTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "poolType")); } public Boolean isFullCloneFlag() { diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java index 6b98baf15d40..dc68b31a3fd1 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -27,6 +27,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.Storage.ImageFormat; import com.cloud.template.VirtualMachineTemplate; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class TemplateObjectTO extends DownloadableObjectTO implements DataTO { private String path; @@ -264,6 +265,8 @@ public void setDeployAsIsConfiguration(String deployAsIsConfiguration) { @Override public String toString() { - return String.format("TemplateTO[id=%d|uuid=%s|origUrl=%s|name%s]", id, uuid, origUrl, name); + return String.format("TemplateTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "origUrl")); } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index b84f0204f9bd..4d1d0bf90971 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -30,6 +30,7 @@ import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage; import com.cloud.storage.Volume; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.Arrays; @@ -258,7 +259,9 @@ public void setPoolId(Long poolId){ @Override public String toString() { - return String.format("volumeTO[id=%s|uuid=%s|name=%s|path=%s|datastore=%s]", id, uuid, name, path, dataStore); + return String.format("volumeTO %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "path", "dataStore")); } public void setBytesReadRate(Long bytesReadRate) { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index 806daec324b6..30a58d405c9e 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -34,6 +34,7 @@ import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -149,11 +150,9 @@ protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final Str @Override public String toString() { - return "AgentAttache{" + - "id=" + _id + - ", uuid='" + _uuid + '\'' + - ", name='" + _name + '\'' + - '}'; + return String.format("AgentAttache %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "_id", "_uuid", "_name")); } public synchronized long getNextSequence() { diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 79e6d2a34b36..a8b0130bdbc0 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -5800,7 +5800,7 @@ public Pair findClusterAndHostIdForVm(VirtualMachine vm, boolean ski hostId = vm.getHostId(); } Long clusterId = null; - if(hostId == null) { + if (hostId == null) { if (vm.getLastHostId() == null) { return findClusterAndHostIdForVmFromVolumes(vm.getId()); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java index db90ce0287ec..19b0e773cd01 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineClusterVO.java @@ -270,6 +270,6 @@ public PartitionType partitionType() { public String toString() { return String.format("EngineCluster %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java index 4691dd323042..5f1203c024a4 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineDataCenterVO.java @@ -529,6 +529,6 @@ public DataCenter.Type getType() { public String toString() { return String.format("EngineDataCenter %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java index f9fc4421ce5a..95931d5b72d5 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostPodVO.java @@ -252,6 +252,6 @@ public State getState() { public String toString() { return String.format("EngineHostPod %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java index 7c94e9e28891..053d9ac218ee 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/EngineHostVO.java @@ -700,7 +700,7 @@ public boolean equals(Object obj) { public String toString() { return String.format("EngineHost %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "type")); + this, "id", "uuid", "name", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 830daef0580d..0773c20b6b98 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -309,9 +309,9 @@ protected Pair migrateCompleted(Long destDatastoreId, DataStore message += "Image stores have been attempted to be balanced"; success = true; } else { - message = String.format("Files not completely migrated from %s. Datastore (source): %s " + + message = String.format("Files not completely migrated from %s. Source datastore " + "has equal or more free space than destination. If you want to continue using the Image Store, " + - "please change the read-only status using 'update imagestore' command", srcDatastore, srcDatastore); + "please change the read-only status using 'update imagestore' command", srcDatastore); success = false; } } else { diff --git a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java index b0b27933ccee..fdda38fbc393 100644 --- a/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/HostPodVO.java @@ -203,6 +203,6 @@ public void setUuid(String uuid) { public String toString() { return String.format("HostPod %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java index 9f0f1c6929a3..c271325f3dee 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VlanVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VlanVO.java @@ -29,6 +29,7 @@ import javax.persistence.Table; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "vlan") @@ -192,29 +193,11 @@ public void setPhysicalNetworkId(Long physicalNetworkId) { @Override public String toString() { if (toString == null) { - toString = - new StringBuilder("Vlan[") - .append(id) - .append("|") - .append(uuid) - .append("|") - .append(vlanTag) - .append("|") - .append(vlanGateway) - .append("|") - .append(vlanNetmask) - .append("|") - .append(ip6Gateway) - .append("|") - .append(ip6Cidr) - .append("|") - .append(ipRange) - .append("|") - .append(ip6Range) - .append("|") - .append(networkId) - .append("]") - .toString(); + toString = String.format("Vlan %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", + "vlanTag", "vlanGateway", "vlanNetmask", "ip6Gateway", "ip6Cidr", + "ipRange", "ip6Range", "networkId")); + } return toString; } diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java index 7f838a6c61e9..c950fa31c881 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java @@ -209,7 +209,7 @@ public void setState(Domain.State state) { public String toString() { return String.format("Domain %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "path")); + this, "id", "uuid", "name", "path")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index b5b634a73a70..a449eb450cf9 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -712,7 +712,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "type")); + return String.format("Host %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "type")); } public void setHypervisorType(HypervisorType hypervisorType) { diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java index 36411d720b01..24d8b8e7f40a 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScalePolicyVO.java @@ -95,7 +95,7 @@ public AutoScalePolicyVO(String name, long domainId, long accountId, int duratio public String toString() { return String.format("AutoScalePolicy %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java index 8c408e24f652..307de9f1a60d 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmGroupVO.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.utils.db.GenericDao; @@ -126,12 +127,9 @@ public AutoScaleVmGroupVO(long lbRuleId, long zoneId, long domainId, @Override public String toString() { - return new StringBuilder("AutoScaleVmGroupVO[").append("id=").append(id) - .append("|uuid=").append(uuid) - .append("|name=").append(name) - .append("|loadBalancerId=").append(loadBalancerId) - .append("|profileId=").append(profileId) - .append("]").toString(); + return String.format("AutoScaleVmGroup %s.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "loadBalancerId", "profileId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java index 3d869a897dd0..562d908507e8 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/AutoScaleVmProfileVO.java @@ -127,9 +127,9 @@ public AutoScaleVmProfileVO(long zoneId, long domainId, long accountId, long ser @Override public String toString() { - return String.format("AutoScaleVMProfileVO %s.", + return String.format("AutoScaleVMProfile %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "templateId", "uuid")); + this, "id", "uuid", "templateId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java index c13076baa46b..be21515bb51a 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java +++ b/engine/schema/src/main/java/com/cloud/network/as/CounterVO.java @@ -82,7 +82,7 @@ public CounterVO(Source source, String name, String value, Network.Provider prov public String toString() { return String.format("Counter %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java index 80d40b13f32a..9557c7465bff 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkServiceProviderVO.java @@ -114,7 +114,7 @@ public PhysicalNetworkServiceProviderVO(long physicalNetworkId, String name) { @Override public String toString() { return String.format("PhysicalNetworkServiceProvider %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "providerName")); + this, "id", "uuid", "name", "providerName")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java index 93850a322dd5..68e023984a0c 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkVO.java @@ -252,8 +252,8 @@ public String getName() { @Override public String toString() { - return String.format("PhysicalNetwork %s.", + return String.format("PhysicalNetwork %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java index 2a4c19fb11dc..1dfdc5093a59 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/FirewallRuleVO.java @@ -261,7 +261,7 @@ public FirewallRuleVO(String xId, Long ipAddressId, Integer portStart, Integer p public String toString() { return String.format("FirewallRule %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "purpose", "state")); + this, "id", "uuid", "networkId", "purpose", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java index c2a52663f216..4333d35d4733 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/NetworkACLItemVO.java @@ -171,7 +171,7 @@ public Date getCreated() { public String toString() { return String.format("NetworkACLItem %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "state")); + this, "id", "uuid", "aclId", "state")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java index d4f0783451c4..274b9fedecce 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java @@ -183,7 +183,7 @@ public void setUniqueName(String uniqueName) { public String toString() { return String.format("VPCOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } public void setName(String name) { diff --git a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java index 73cd3d184dea..5cad366945f3 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java @@ -473,7 +473,7 @@ public NetworkOfferingVO(String name, Network.GuestType guestType, boolean speci @Override public String toString() { return String.format("NetworkOffering %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "trafficType")); + this, "id", "uuid", "name", "trafficType")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java index 2c127986f9f4..887939311b24 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectInvitationVO.java @@ -130,7 +130,7 @@ public void setState(State state) { public String toString() { return String.format("ProjectInvitation %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "projectId", "uuid", "forAccountId")); + this, "id", "uuid", "projectId", "forAccountId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java index d4d35e677052..4ac34eeab4c2 100644 --- a/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java +++ b/engine/schema/src/main/java/com/cloud/projects/ProjectVO.java @@ -119,7 +119,7 @@ public Date getRemoved() { public String toString() { return String.format("Project %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "domainId")); + this, "id", "uuid", "name", "domainId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java index 5da09f569f25..79f5bcb51578 100644 --- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java @@ -594,6 +594,6 @@ public void setDiskSizeStrictness(boolean diskSizeStrictness) { public String toString() { return String.format("DiskOffering %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java index 853dacac4701..19c67a91e2c8 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java @@ -30,12 +30,11 @@ import javax.persistence.Table; import org.apache.cloudstack.util.HypervisorTypeConverter; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; import com.google.gson.annotations.Expose; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "snapshots") @@ -283,7 +282,8 @@ public Class getEntityType() { @Override public String toString() { - return String.format("Snapshot %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("id", getId()).append("uuid", getUuid()).append("name", getName()) - .append("volumeId", getVolumeId()).toString()); + return String.format("Snapshot %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "volumeId", "version")); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index df7e7b7db2b1..653be54a9109 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -516,8 +516,7 @@ public void setUpdated(Date updated) { public String toString() { return String.format("Volume %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", - "uuid", "volumeType", "instanceId")); + this, "id", "uuid", "name", "volumeType", "instanceId")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java index 63eea3e20ddb..4c8208b4be84 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpVO.java @@ -92,7 +92,7 @@ protected NicSecondaryIpVO() { public String toString() { return String.format("NicSecondaryIp %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "vmId", + this, "id", "uuid", "name", "vmId", "nicId", "ip4Address", "ip6Address", "networkId")); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java index 7f534a226d58..cff139a9263a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java @@ -126,7 +126,8 @@ public boolean isDefault() { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "roleType"); + return String.format("Role %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "roleType")); } public boolean isPublicRole() { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java index a1627ee9cd43..917f8bb800a2 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/VMEntityVO.java @@ -461,7 +461,7 @@ public void setDetails(Map details) { @Override public String toString() { if (toString == null) { - toString = String.format("VM %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "instanceName", "uuid", "type")); + toString = String.format("VM %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "instanceName", "type")); } return toString; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java index 8be37df0d5b4..c13f5aac6d69 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java @@ -221,6 +221,6 @@ public void setUsedBytes(Long usedBytes) { public String toString() { return String.format("ImageStore %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index eebddc166b56..92a444bd83f1 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -371,7 +371,7 @@ public int hashCode() { @Override public String toString() { - return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid", "poolType")); + return String.format("StoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "name", "poolType")); } @Override diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index bf7b0d6c634b..a3b7d0c9ecc1 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -26,6 +26,7 @@ import com.cloud.cpu.CPU; import com.cloud.storage.StorageManager; import com.cloud.user.UserData; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -600,9 +601,8 @@ public boolean isFollowRedirects() { @Override public String toString() { - return "TemplateObject{" + - "templateVO=" + getImage() + - ", dataStore=" + getDataStore() + - '}'; + return String.format("TemplateObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "imageVO", "dataStore")); } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index 7b1088d589fc..f5cfaf072743 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -136,7 +136,7 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { try { snapObj.processEvent(Snapshot.Event.OperationNotPerformed); } catch (NoTransitionException e) { - logger.debug("Failed to change state: {}: {}", snapshot, e.toString()); + logger.debug("Failed to change state of the snapshot {}, due to {}", snapshot, e); throw new CloudRuntimeException(e.toString()); } return snapshotDataFactory.getSnapshot(snapObj.getId(), store); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 8a2e1565fe22..a3964bd461ec 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -41,6 +41,7 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -467,9 +468,8 @@ public Class getEntityType() { @Override public String toString() { - return "SnapshotObject{" + - "snapshotVO=" + getSnapshotVO() + - ", dataStore=" + getDataStore() + - '}'; + return String.format("SnapshotObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "snapshot", "store")); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 7d8b83377853..6a10c26cc0bc 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -469,7 +468,6 @@ public StoragePoolType getParentPoolType() { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(pdsv, - "id", "name", "uuid"); + return pdsv.toString(); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index b8f30aff8cfd..4a9f34c9f565 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -947,9 +947,8 @@ public boolean isFollowRedirects() { @Override public String toString() { - return "VolumeObject{" + - "volumeVO=" + getVolume() + - ", dataStore=" + getDataStore() + - '}'; + return String.format("VolumeObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "volumeVO", "dataStore")); } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java index c589f4896b17..6c3b2a93994c 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ManagementServerHostVO.java @@ -202,7 +202,7 @@ public void setAlertCount(int count) { public String toString() { return String.format("ManagementServer %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "msid", "name")); + this, "id", "uuid", "name", "msid")); } @Override diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java index 36c0fb72c36c..0f2c8d1736a4 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobVO.java @@ -40,6 +40,7 @@ import com.cloud.utils.UuidUtils; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "async_job") @@ -384,27 +385,10 @@ public void setRemoved(final Date removed) { @Override public String toString() { - StringBuffer sb = new StringBuffer(); - sb.append("AsyncJobVO: {id:").append(getId()); - sb.append(", uuid: ").append(getUuid()); - sb.append(", userId: ").append(getUserId()); - sb.append(", accountId: ").append(getAccountId()); - sb.append(", instanceType: ").append(getInstanceType()); - sb.append(", instanceId: ").append(getInstanceId()); - sb.append(", cmd: ").append(getCmd()); - sb.append(", cmdInfo: ").append(getCmdInfo()); - sb.append(", cmdVersion: ").append(getCmdVersion()); - sb.append(", status: ").append(getStatus()); - sb.append(", processStatus: ").append(getProcessStatus()); - sb.append(", resultCode: ").append(getResultCode()); - sb.append(", result: ").append(getResult()); - sb.append(", initMsid: ").append(getInitMsid()); - sb.append(", completeMsid: ").append(getCompleteMsid()); - sb.append(", lastUpdated: ").append(getLastUpdated()); - sb.append(", lastPolled: ").append(getLastPolled()); - sb.append(", created: ").append(getCreated()); - sb.append(", removed: ").append(getRemoved()); - sb.append("}"); - return sb.toString(); + return String.format("AsyncJob %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", + "userId", "accountId", "instanceType", "instanceId", "cmd", "cmdInfo", + "cmdVersion", "status", "processStatus", "resultCode", "result", "initMsid", + "completeMsid", "lastUpdated", "lastPolled", "created", "removed")); } } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index 705b35b40436..0ed658aa70d9 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -95,7 +95,7 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { if (vmGroupMapping != null) { if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM: " + vm); + logger.debug("Processing affinity group {} of type 'ExplicitDedication' for VM: {}", _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()), vm); } long affinityGroupId = vmGroupMapping.getAffinityGroupId(); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index 1b2691c43c3e..83199b5f51c0 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -76,12 +76,12 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (vm.getLastHostId() != null && haVmTag == null) { - HostVO h = _hostDao.findById(vm.getLastHostId()); - DataCenter dc = _dcDao.findById(h.getDataCenterId()); - Pod pod = _podDao.findById(h.getPodId()); - Cluster c = _clusterDao.findById(h.getClusterId()); - logger.debug(String.format("Start baremetal vm %s on last stayed host %s", vm, h)); - return new DeployDestination(dc, pod, c, h); + HostVO host = _hostDao.findById(vm.getLastHostId()); + DataCenter dc = _dcDao.findById(host.getDataCenterId()); + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + logger.debug("Start baremetal vm {} on last stayed host {}", vm, host); + return new DeployDestination(dc, pod, cluster, host); } if (haVmTag != null) { @@ -128,18 +128,18 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl cluster.getDataCenterId()); return null; } - for (HostVO h : hosts) { - long cluster_id = h.getClusterId(); + for (HostVO host : hosts) { + long cluster_id = host.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (_capacityMgr.checkIfHostHasCapacity(h, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - logger.debug(String.format("Find host %s has enough capacity", h)); - DataCenter dc = _dcDao.findById(h.getDataCenterId()); - Pod pod = _podDao.findById(h.getPodId()); - return new DeployDestination(dc, pod, cluster, h); + if (_capacityMgr.checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { + logger.debug(String.format("Find host %s has enough capacity", host)); + DataCenter dc = _dcDao.findById(host.getDataCenterId()); + Pod pod = _podDao.findById(host.getPodId()); + return new DeployDestination(dc, pod, cluster, host); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 52adc59cbe7b..560020cad388 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -20,14 +20,13 @@ import java.util.List; import java.util.Map; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import org.libvirt.StoragePool; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; import com.cloud.agent.api.to.HostTO; import com.cloud.agent.properties.AgentProperties; @@ -328,7 +327,7 @@ public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String ho @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("path", getLocalPath()).toString(); + return String.format("LibvirtStoragePool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "path")); } @Override diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java index d6c8555a0233..7fec1e2c093d 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/BigSwitchBcfDeviceVO.java @@ -95,7 +95,7 @@ public BigSwitchBcfDeviceVO(final long hostId, final long physicalNetworkId, public String toString() { return String.format("BigSwitchBcfDevice %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } @Override diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java index 9c06efc6e402..326ea4948604 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java @@ -276,9 +276,9 @@ public BrocadeVcsDeviceVO addBrocadeVcsDevice(AddBrocadeVcsDeviceCmd cmd) { final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %sto add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %sto add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); } Map params = new HashMap(); diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java index ea2cf19715b2..f03ea9a56568 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcControllerVO.java @@ -66,9 +66,9 @@ public CiscoVnmcControllerVO(long hostId, long physicalNetworkId, String provide @Override public String toString() { - return String.format("CiscoVnmcController %s.", + return String.format("CiscoVnmcController %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } @Override diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java index 12269bf7cf7f..06ff497dfd25 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -72,7 +72,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan private boolean canHandle(Network network, List rules) { if (network.getGuestType() != Network.GuestType.Shared || network.getTrafficType() != TrafficType.Guest) { - logger.debug(String.format("Not handling network %s with type %s and traffic type %s", network, network.getGuestType(), network.getTrafficType())); + logger.debug("Not handling network {} with type {} and traffic type {}", network, network.getGuestType(), network.getTrafficType()); return false; } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index 6c7b88afd896..c02d8cf67aa8 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -247,7 +247,7 @@ public boolean applyLoadBalancerRules(Network network, List r DomainRouterVO elbVm = findElbVmForLb(rules.get(0)); if (elbVm == null) { - logger.warn(String.format("Unable to apply lb rules, ELB vm doesn't exist in the network %s", network)); + logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network {}", network); throw new ResourceUnavailableException("Unable to apply lb rules", DataCenter.class, network.getDataCenterId()); } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java index 45afa58a1a07..4e331891485b 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java @@ -233,7 +233,7 @@ private DomainRouterVO deployELBVm(Network guestNetwork, final DeployDestination guestNetwork = _networkDao.acquireInLockTable(guestNetworkId); if (guestNetwork == null) { - throw new ConcurrentOperationException(String.format("Unable to acquire network lock: %s", guestNetwork)); + throw new ConcurrentOperationException(String.format("Unable to acquire lock for the network: %s", guestNetwork)); } try { diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java index b693e06ad8f0..096b400ee938 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java @@ -334,7 +334,7 @@ public boolean manageGuestNetworkWithNetscalerControlCenter(boolean add, Network } } netscalerControlCenter = _hostDao.findById(lbDeviceVO.getId()); - logger.debug(String.format("Allocated Netscaler Control Center device:%s for the network: %s", lbDeviceVO, guestConfig)); + logger.debug("Allocated Netscaler Control Center device: {} for the network: {}", lbDeviceVO, guestConfig); } else { // find the load balancer device allocated for the network diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java index 299341161491..c3d4cf4b24ea 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java @@ -269,7 +269,7 @@ public Map deployNsVpx(Account owner, DeployDestination dest, De ServiceOfferingVO vpxOffering = _serviceOfferingDao.findById(svcOffId); //using 2GB and 2CPU offering if(vpxOffering.getRamSize() < 2048 && vpxOffering.getCpu() <2 ) { - throw new InvalidParameterValueException(String.format("Specified Service Offering :%s NS Vpx cannot be deployed. Min 2GB Ram and 2 CPU are required", vpxOffering)); + throw new InvalidParameterValueException(String.format("Specified Service Offering: %s NS Vpx cannot be deployed. Min 2GB Ram and 2 CPU are required", vpxOffering)); } long userId = CallContext.current().getCallingUserId(); diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java index 63aee8c2dc3f..3ad08428e9dc 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ElastistorPrimaryDataStoreLifeCycle.java @@ -365,8 +365,8 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { if (!dataStoreVO.isManaged()) { boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h, primarystore); + for (HostVO host : allHosts) { + success = createStoragePool(host, primarystore); if (success) { break; } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index 88bb487861df..052191128f1c 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -294,6 +294,6 @@ private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StorageP assert (answer instanceof ModifyStoragePoolAnswer) : String.format("ModifyStoragePoolAnswer expected ; Pool = %s Host = %s", storagePool, host); - logger.info("Connection established between storage pool {} and host + {}", storagePool, host); + logger.info("Connection established between storage pool {} and host {}", storagePool, host); } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java index 3111b04e0c71..7e0986bc63b5 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java @@ -129,7 +129,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep StorPoolUtil.spLog("Storage pool [%s] is not connected to the host [%s]", poolVO, host); deleteVolumeWhenHostCannotConnectPool(conn, volumeOnPool); removePoolOnHost(poolHost, isPoolConnectedToTheHost); - throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command%s", pool)); + throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command for pool %s", pool)); } if (!answer.getResult()) { diff --git a/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java b/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java index bdd66fd5b233..3324bf620410 100644 --- a/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java +++ b/server/src/main/java/com/cloud/dc/DedicatedResourceVO.java @@ -174,7 +174,7 @@ public int hashCode() { @Override public String toString() { - return String.format("DedicatedResourceVO %s", + return String.format("DedicatedResource %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid")); } diff --git a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java index 98cd6c8c3fa5..9edaa5e6d646 100644 --- a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java +++ b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java @@ -149,7 +149,7 @@ protected void setVmQuotaPercentage(VirtualMachineTO to, VirtualMachineProfile v "Setting CPU quota percentage as: {}", host, hostMaxSpeed, vm, maxSpeed, percent.doubleValue()); } catch (NumberFormatException e) { - logger.error("Error calculating VM: {} quota percentage, it wll not be set. Error: {}", vm, e.getMessage(), e); + logger.error("Error calculating VM: {} quota percentage, it will not be set. Error: {}", vm, e.getMessage(), e); } } } diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 47225a635dcf..5f6cae31699c 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -1328,7 +1328,7 @@ public String getDefaultManagementTrafficLabel(long zoneId, HypervisorType hyper } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for management " + + logger.debug("Failed to retrieve the default label for management " + "traffic: zone: {} hypervisor: {} due to: {}", () -> _dcDao.findById(zoneId), hypervisorType::toString, ex::getMessage); } @@ -1358,7 +1358,7 @@ public String getDefaultStorageTrafficLabel(long zoneId, HypervisorType hypervis } } catch (Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Failed to retrive the default label for storage " + + logger.debug("Failed to retrieve the default label for storage " + "traffic: zone: {} hypervisor: {} due to: {}", () -> _dcDao.findById(zoneId), hypervisorType::toString, ex::getMessage); } diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index cff889b80f49..3d3a28d14048 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -671,7 +671,7 @@ public AutoScaleVmProfile updateAutoScaleVmProfile(UpdateAutoScaleVmProfileCmd c } vmProfile = checkValidityAndPersist(vmProfile, false); - logger.info("Updated Auto Scale Vm Profile:{}", vmProfile); + logger.info("Updated Auto Scale Vm Profile: {}", vmProfile); return vmProfile; } diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index e8a523ec3ac4..015cbe490494 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -603,7 +603,7 @@ public StickinessPolicy createLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); if (loadBalancer.getState() == FirewallRule.State.Revoke) { - throw new InvalidParameterValueException(String.format("Failed: LB rule: %s is in deleting state: ", loadBalancer)); + throw new InvalidParameterValueException(String.format("Failed: LB rule: %s is in deleting state: ", loadBalancer)); } /* Generic validations */ diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 9c19009a50e8..76d2943e18c8 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -5047,19 +5047,19 @@ private boolean updateHostsInCluster(final UpdateHostPasswordCmd command) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - for (final HostVO h : hosts) { + for (final HostVO host : hosts) { if (logger.isDebugEnabled()) { - logger.debug("Changing password for host {}", h); + logger.debug("Changing password for host {}", host); } // update password for this host - final DetailVO nv = _detailsDao.findDetail(h.getId(), ApiConstants.USERNAME); + final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); if (nv == null) { - final DetailVO nvu = new DetailVO(h.getId(), ApiConstants.USERNAME, userNameWithoutSpaces); + final DetailVO nvu = new DetailVO(host.getId(), ApiConstants.USERNAME, userNameWithoutSpaces); _detailsDao.persist(nvu); - final DetailVO nvp = new DetailVO(h.getId(), ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); + final DetailVO nvp = new DetailVO(host.getId(), ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else if (nv.getValue().equals(userNameWithoutSpaces)) { - final DetailVO nvp = _detailsDao.findDetail(h.getId(), ApiConstants.PASSWORD); + final DetailVO nvp = _detailsDao.findDetail(host.getId(), ApiConstants.PASSWORD); nvp.setValue(DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else { diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 092e799eb2ca..be5ef08cceba 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1471,7 +1471,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (vmDiskStat_lock == null) { - logger.warn("unable to find vm disk stats from host for account: {} with vm: {} and volume:{}", vm.getAccountId(), vm, volume); + logger.warn("unable to find vm disk stats from host for account: {} with vm: {} and volume: {}", vm.getAccountId(), vm, volume); continue; } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index c1708262a9d9..2f19dffb7b3d 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -1773,7 +1773,7 @@ public Volume destroyVolume(long volumeId, Account caller, boolean expunge, bool stateTransitTo(volume, Volume.Event.DestroyRequested); stateTransitTo(volume, Volume.Event.OperationSucceeded); } catch (NoTransitionException e) { - logger.debug("Failed to destroy volume{}", volume, e); + logger.debug("Failed to destroy volume {}", volume, e); return null; } _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), volume.isDisplay(), diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java index c06cf5a03830..209f69dcddc0 100644 --- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java @@ -167,10 +167,9 @@ protected void checkTagsDeletePermission(List tagsToDelete, Account for (ResourceTag resourceTag : tagsToDelete) { Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); if(logger.isDebugEnabled()) { - logger.debug("Resource Tag Id: {}", resourceTag.getResourceId()); - logger.debug("Resource Tag Uuid: {}", resourceTag.getResourceUuid()); - logger.debug("Resource Tag Type: {}", resourceTag.getResourceType()); - logger.debug("Resource Tag Account: {}", owner); + logger.debug("Resource Tag Id: {}, Resource Tag Uuid: {}, Resource Tag Type: {}, " + + "Resource Tag Account: {}", resourceTag.getResourceId(), + resourceTag.getResourceUuid(), resourceTag.getResourceType(), owner); } if (caller.getAccountId() != resourceTag.getAccountId()) { if(logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index d1a491cde665..b5be09376fc5 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -236,7 +236,7 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d } Account caller = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone %s is currently disabled", zone)); } } } diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java index dd91891f8343..34260f606253 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java @@ -24,6 +24,7 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataTO; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class DiagnosticsDataObject implements DataObject { private DataTO dataTO; @@ -36,10 +37,9 @@ public DiagnosticsDataObject(DataTO dataTO, DataStore dataStore) { @Override public String toString() { - return "DiagnosticsDataObject{" + - "dataTO=" + dataTO + - ", dataStore=" + getDataStore() + - '}'; + return String.format("DiagnosticsDataObject %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "dataTO", "dataStore")); } @Override diff --git a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java index 81a85b03cf09..5907c1864ad7 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java +++ b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java @@ -69,12 +69,12 @@ public boolean isInMaintenanceMode(final Host host) { } @Override - public void fenceSubResources(final Host r) { - if (r.getState() != Status.Down) { + public void fenceSubResources(final Host host) { + if (host.getState() != Status.Down) { try { - logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host {}", r); - agentManager.disconnectWithoutInvestigation(r.getId(), Event.HostDown); - oldHighAvailabilityManager.scheduleRestartForVmsOnHost((HostVO)r, true); + logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host {}", host); + agentManager.disconnectWithoutInvestigation(host.getId(), Event.HostDown); + oldHighAvailabilityManager.scheduleRestartForVmsOnHost((HostVO)host, true); } catch (Exception e) { logger.error("Failed to disconnect host and schedule HA restart of VMs after fencing the host: ", e); } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java index 7f7e833e6570..577164d4c0d6 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java @@ -42,7 +42,7 @@ public PowerOperationTask(OutOfBandManagementService service, Host host, OutOfBa @Override public String toString() { - return String.format("[OOBM Task] Power operation:%s on Host:%d(%s)", powerOperation, host.getId(), host.getName()); + return String.format("[OOBM Task] Power operation: %s on Host: %s", powerOperation, host); } @Override From b47db16b1102cf5482c2b9fb0d8430811d6c68b5 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Fri, 27 Dec 2024 17:39:42 +0530 Subject: [PATCH 19/22] fixups --- .../main/java/com/cloud/dc/VmwareDatacenterVO.java | 5 ++++- .../src/main/java/com/cloud/storage/BucketVO.java | 8 ++++---- .../apache/cloudstack/secstorage/HeuristicVO.java | 4 +++- .../storage/datastore/db/ObjectStoreVO.java | 2 +- .../cloudstack/storage/sharedfs/SharedFSVO.java | 2 +- .../cloudstack/vm/schedule/VMScheduledJobVO.java | 2 +- .../storage/object/store/ObjectStoreImpl.java | 8 ++++---- .../apache/cloudstack/framework/events/Event.java | 12 ++++-------- .../mom/webhook/vo/WebhookDeliveryJoinVO.java | 4 ++-- .../cloudstack/mom/webhook/vo/WebhookDeliveryVO.java | 2 +- .../cloudstack/mom/webhook/vo/WebhookJoinVO.java | 2 +- .../apache/cloudstack/mom/webhook/vo/WebhookVO.java | 2 +- .../kvm/storage/MultipathSCSIAdapterBase.java | 5 ++++- .../GenericHeuristicPresetVariable.java | 4 +++- 14 files changed, 34 insertions(+), 28 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java b/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java index 6390d923ed83..5a4a71f82e7f 100644 --- a/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/VmwareDatacenterVO.java @@ -28,6 +28,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.Encrypt; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; /** * VmwareDatacenterVO contains information of Vmware Datacenter associated with a CloudStack zone. @@ -125,7 +126,9 @@ public void setPassword(String password) { @Override public String toString() { - return new StringBuilder("VmwareDatacenter[").append(guid).append("]").toString(); + return String.format("VmwareDatacenter %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "guid")); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java index 53017447c078..a54c1dd9b081 100644 --- a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java @@ -19,8 +19,7 @@ import com.cloud.utils.db.GenericDao; import com.google.gson.annotations.Expose; import org.apache.cloudstack.storage.object.Bucket; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -257,7 +256,8 @@ public Class getEntityType() { @Override public String toString() { - return String.format("Bucket %s", new ToStringBuilder(this, ToStringStyle.JSON_STYLE).append("uuid", getUuid()).append("name", getName()) - .append("ObjectStoreId", getObjectStoreId()).toString()); + return String.format("Bucket %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "objectStoreId")); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/secstorage/HeuristicVO.java b/engine/schema/src/main/java/org/apache/cloudstack/secstorage/HeuristicVO.java index b0da0c5e7477..f647d0c83656 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/secstorage/HeuristicVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/secstorage/HeuristicVO.java @@ -120,6 +120,8 @@ public void setHeuristicRule(String heuristicRule) { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "heuristicRule", "type"); + return String.format("Heuristic %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "heuristicRule", "type")); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java index 7a186b604295..18cc06a65733 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java @@ -146,6 +146,6 @@ public void setDetails(Map details) { public String toString() { return String.format("ObjectStore %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid", "providerName")); + this, "id", "uuid", "name", "providerName")); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java index b6d51e5cfa17..8870bf6d4d89 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java @@ -125,7 +125,7 @@ public SharedFSVO(String name, String description, long domainId, long accountId public String toString() { return String.format("SharedFS %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "name", "uuid")); + this, "id", "uuid", "name")); } @Override diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java index b13d77849433..775e9cfe40cf 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduledJobVO.java @@ -78,7 +78,7 @@ public VMScheduledJobVO(long vmId, long vmScheduleId, VMSchedule.Action action, public String toString() { return String.format("VMScheduledJob %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "action", "vmScheduleId", "vmId")); + this, "id", "uuid", "action", "vmScheduleId", "vmId", "asyncJobId")); } @Override diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java index 93d3bd7c2f10..a96d87ada045 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.object.ObjectStoreDriver; import org.apache.cloudstack.storage.object.ObjectStoreEntity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import java.util.Date; import java.util.List; @@ -59,10 +60,9 @@ public static ObjectStoreEntity getDataStore(ObjectStoreVO objectStoreVO, Object @Override public String toString() { - return "ObjectStoreImpl{" + - "objectStoreVO=" + objectStoreVO + - ", provider=" + provider.getName() + - '}'; + return String.format("ObjectStoreImpl %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "objectStoreVO", "provider")); } @Override diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java index 9dfb5c699b94..c293de8b4ddd 100644 --- a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java @@ -21,6 +21,7 @@ import com.google.gson.Gson; import com.google.gson.annotations.Expose; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class Event { @@ -51,14 +52,9 @@ public Event(String eventSource, String eventCategory, String eventType, String @Override public String toString() { - return "Event{" + - "eventId=" + eventId + - ", eventUuid='" + eventUuid + '\'' + - ", eventType='" + eventType + '\'' + - ", resourceType='" + resourceType + '\'' + - ", resourceUUID='" + resourceUUID + '\'' + - ", description='" + description + '\'' + - '}'; + return String.format("Event %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "eventId", "eventUuid", "eventType", "resourceType", "resourceUUID", "description")); } public Long getEventId() { diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java index e36f870c8d95..f0fb3e1cc9b1 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java @@ -173,8 +173,8 @@ public Date getEndTime() { @Override public String toString() { - return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( - this, "id", "uuid", "webhookId", "startTime", "success")); + return String.format("WebhookDelivery %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "webhookId", "webhookName", "startTime", "success")); } public WebhookDeliveryJoinVO() { diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java index e39f57a26637..e266ea5d7c4b 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java @@ -130,7 +130,7 @@ public Date getEndTime() { @Override public String toString() { - return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("WebhookDelivery %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "webhookId", "startTime", "success")); } diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java index f17086095871..9ff15d34a9cd 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java @@ -225,7 +225,7 @@ public Class getEntityType() { @Override public String toString() { - return String.format("Webhook [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("Webhook %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "name")); } diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java index 93e3e801423e..852cdf740d1a 100644 --- a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java @@ -191,7 +191,7 @@ public Class getEntityType() { @Override public String toString() { - return String.format("Webhook [%s]",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + return String.format("Webhook %s",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( this, "id", "uuid", "name", "payloadUrl")); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 19d8378eb786..0cf8ce0018d3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -38,6 +38,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -604,7 +605,9 @@ public String getPath() { } public String toString() { - return String.format("type=%s; address=%s; connid=%s", getType(), getAddress(), getConnectionId()); + return String.format("AddressInfo %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "type", "address", "connectionId")); } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java index f8ded3a864a7..28d4327954e7 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/GenericHeuristicPresetVariable.java @@ -38,6 +38,8 @@ public void setName(String name) { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, fieldNamesToIncludeInToString.toArray(new String[0])); + return String.format("GenericHeuristicPresetVariable %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, fieldNamesToIncludeInToString.toArray(new String[0]))); } } From 1cb1962fbd2ca25584be7323ae8d08e14d83d951 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Tue, 31 Dec 2024 12:56:49 +0530 Subject: [PATCH 20/22] monir fixups --- .../java/com/cloud/storage/OCFS2ManagerImpl.java | 14 +++++++------- .../com/cloud/tags/TaggedResourceManagerImpl.java | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java index 0845af7b293b..0bc586c28de4 100644 --- a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java @@ -88,12 +88,12 @@ public boolean stop() { private List> marshalNodes(List hosts) { Integer i = 0; List> lst = new ArrayList>(); - for (HostVO h : hosts) { + for (HostVO host : hosts) { /** * Don't show "node" in node name otherwise OVM's utils/config_o2cb.sh will be going crazy */ - String nodeName = "ovm_" + h.getPrivateIpAddress().replace(".", "_"); - Ternary node = new Ternary(i, h.getPrivateIpAddress(), nodeName); + String nodeName = "ovm_" + host.getPrivateIpAddress().replace(".", "_"); + Ternary node = new Ternary(i, host.getPrivateIpAddress(), nodeName); lst.add(node); i++; } @@ -102,14 +102,14 @@ private List> marshalNodes(List hosts) private boolean prepareNodes(String clusterName, List hosts) { PrepareOCFS2NodesCommand cmd = new PrepareOCFS2NodesCommand(clusterName, marshalNodes(hosts)); - for (HostVO h : hosts) { - Answer ans = _agentMgr.easySend(h.getId(), cmd); + for (HostVO host : hosts) { + Answer ans = _agentMgr.easySend(host.getId(), cmd); if (ans == null) { - logger.debug("Host {} is not in UP state, skip preparing OCFS2 node on it", h); + logger.debug("Host {} is not in UP state, skip preparing OCFS2 node on it", host); continue; } if (!ans.getResult()) { - logger.warn("PrepareOCFS2NodesCommand failed on host {} {}", h, ans.getDetails()); + logger.warn("PrepareOCFS2NodesCommand failed on host {} {}", host, ans.getDetails()); return false; } } diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java index 209f69dcddc0..caae8f133a48 100644 --- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java @@ -167,9 +167,9 @@ protected void checkTagsDeletePermission(List tagsToDelete, Account for (ResourceTag resourceTag : tagsToDelete) { Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); if(logger.isDebugEnabled()) { - logger.debug("Resource Tag Id: {}, Resource Tag Uuid: {}, Resource Tag Type: {}, " + - "Resource Tag Account: {}", resourceTag.getResourceId(), - resourceTag.getResourceUuid(), resourceTag.getResourceType(), owner); + logger.debug("Resource Tag Id: {}, Uuid: {}, Type: {}, Account: {}", + resourceTag.getResourceId(), resourceTag.getResourceUuid(), + resourceTag.getResourceType(), owner); } if (caller.getAccountId() != resourceTag.getAccountId()) { if(logger.isDebugEnabled()) { From f0ee8840f1b4a4aaefb86a5c7f019c2d1b165f89 Mon Sep 17 00:00:00 2001 From: Vishesh Date: Thu, 2 Jan 2025 14:03:45 +0530 Subject: [PATCH 21/22] Address comments --- .../cloudstack/backup/NetworkerBackupProvider.java | 2 +- .../dedicated/DedicatedResourceManagerImpl.java | 2 +- .../kubernetes/cluster/StopKubernetesClusterCmd.java | 2 +- .../OpenDaylightControllerResourceManagerImpl.java | 4 ++-- .../lifecycle/AdaptiveDataStoreLifeCycleImpl.java | 12 ++++++------ .../java/com/cloud/ha/AbstractInvestigatorImpl.java | 6 +++--- .../ExternalLoadBalancerDeviceManagerImpl.java | 4 ++-- .../com/cloud/network/vpc/NetworkACLManagerImpl.java | 2 +- .../network/vpn/RemoteAccessVpnManagerImpl.java | 2 +- .../java/com/cloud/projects/ProjectManagerImpl.java | 2 +- .../main/java/com/cloud/server/StatsCollector.java | 2 +- .../java/com/cloud/storage/VolumeApiServiceImpl.java | 4 ++-- .../main/java/com/cloud/usage/UsageServiceImpl.java | 2 +- .../main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +- .../apache/cloudstack/backup/BackupManagerImpl.java | 2 +- .../direct/download/DirectDownloadManagerImpl.java | 2 +- .../org/apache/cloudstack/ha/task/BaseHATask.java | 2 +- .../cloudstack/network/RoutedIpv4ManagerImpl.java | 2 +- .../gslb/GlobalLoadBalancingRulesServiceImpl.java | 6 +++--- .../storage/template/VnfTemplateManagerImpl.java | 6 +++--- 20 files changed, 34 insertions(+), 34 deletions(-) diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index 63daaa3721b8..393e2911ac38 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -359,7 +359,7 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { script.add("-v"); Date restoreJobStart = new Date(); - LOG.debug(String.format("Starting Restore for VM ID %s and %s at %s", vm, SSID, restoreJobStart)); + LOG.debug(String.format("Starting Restore for VM %s and %s at %s", vm, SSID, restoreJobStart)); if ( executeRestoreCommand(hostVO, credentials.first(), credentials.second(), script.toString()) ) { Date restoreJobEnd = new Date(); diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 27b5f175d9e5..cd7dc2bbbad6 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -302,7 +302,7 @@ public List dedicatePod(final Long podId, final Long domain if (dedicatedZoneOfPod.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfPod.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(pod.getDataCenterId()); - logger.error(String.format("Cannot dedicate Pod. Its zone%s is already dedicated", zone)); + logger.error(String.format("Cannot dedicate Pod. Its zone %s is already dedicated", zone)); throw new CloudRuntimeException("Pod's Zone " + zone.getName() + " is already dedicated"); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java index 7da778534969..645e45a4c5f7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java @@ -101,7 +101,7 @@ public void execute() throws ServerApiException, ConcurrentOperationException { try { if (!kubernetesClusterService.stopKubernetesCluster(this)) { KubernetesCluster cluster = kubernetesClusterService.findById(getId()); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to start Kubernetes cluster %s with id %d", cluster, getId())); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to stop Kubernetes cluster %s with id %d", cluster, getId())); } final SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java index a1a1fd5ea6cc..c4b3d68de181 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java @@ -106,10 +106,10 @@ public OpenDaylightControllerVO addController(AddOpenDaylightControllerCmd cmd) final PhysicalNetworkServiceProviderVO ntwkSvcProvider = physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %sto add this device", + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", networkDevice.getNetworkServiceProvder(), physicalNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %sto add this device", + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), physicalNetwork)); } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index 13d889dfc81a..771f79887e0f 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -278,19 +278,19 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { if (dataStoreVO.isManaged()) { //boolean success = false; - for (HostVO h : allHosts) { - logger.debug("adding host {} to storage pool {}", h, store); + for (HostVO host : allHosts) { + logger.debug("adding host {} to storage pool {}", host, store); } } logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { + for (HostVO host : allHosts) { try { - _storageMgr.connectHostToSharedPool(h, primarystore.getId()); - poolHosts.add(h); + _storageMgr.connectHostToSharedPool(host, primarystore.getId()); + poolHosts.add(host); } catch (Exception e) { - logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + logger.warn("Unable to establish a connection between {} and {}", host, primarystore, e); } } diff --git a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java index 00179103f931..4ee6adeab603 100644 --- a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java @@ -72,11 +72,11 @@ protected List findHostByPod(long podId, Long excludeHostId) { List hosts = sc.list(); List hostList = new ArrayList<>(hosts.size()); - for (HostVO h : hosts) { - if (excludeHostId != null && h.getId() == excludeHostId) { + for (HostVO host : hosts) { + if (excludeHostId != null && host.getId() == excludeHostId) { continue; } - hostList.add(h); + hostList.add(host); } return hostList; diff --git a/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index d22a701d6e3b..1ded4ecedc6c 100644 --- a/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -244,9 +244,9 @@ public ExternalLoadBalancerDeviceVO addExternalLoadBalancer(long physicalNetwork ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { - throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %sto add this device", ntwkDevice.getNetworkServiceProvder(), pNetwork)); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is not enabled in the physical network: %s to add this device", ntwkDevice.getNetworkServiceProvder(), pNetwork)); } else if (ntwkSvcProvider.getState() == PhysicalNetworkServiceProvider.State.Shutdown) { - throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %sto add this device", ntwkSvcProvider.getProviderName(), pNetwork)); + throw new CloudRuntimeException(String.format("Network Service Provider: %s is in shutdown state in the physical network: %s to add this device", ntwkSvcProvider.getProviderName(), pNetwork)); } if (gslbProvider) { diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java index 60d144b84b2b..5335b24e8971 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -214,7 +214,7 @@ public boolean replaceNetworkACL(final NetworkACL acl, final NetworkVO network) network.setNetworkACLId(acl.getId()); //Update Network ACL if (_networkDao.update(network.getId(), network)) { - logger.debug("Updated network: {} with Network ACL Id: {}, Applying ACL items", network, acl); + logger.debug("Updated network: {} with Network ACL: {}, Applying ACL items", network, acl); //Apply ACL to network final Boolean result = applyACLToNetwork(network.getId()); if (result) { diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 63ef6bdce386..29c0106dc185 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -527,7 +527,7 @@ private boolean removeVpnUserWithoutRemoteAccessVpn(Account vpnOwner, String use return false; } if (!State.Revoke.equals(vpnUser.getState())) { - logger.error("VPN user with ownerId: {} and username: {} is not in revoked state, current state: {}", vpnOwner, userName, vpnUser.getState()); + logger.error("VPN user with owner: {} and username: {} is not in revoked state, current state: {}", vpnOwner, userName, vpnUser.getState()); return false; } return _vpnUsersDao.remove(vpnUser.getId()); diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index 300c65a98426..7a743e3ce767 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -1269,7 +1269,7 @@ public Boolean doInTransaction(TransactionStatus status) { }); } } else { - throw new InvalidParameterValueException(String.format("Unable to find invitation for account name=%s to the project id=%s", accountName, project)); + throw new InvalidParameterValueException(String.format("Unable to find invitation for account name=%s to the project=%s", accountName, project)); } return result; diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index be5ef08cceba..a5f91b1b3f32 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1251,7 +1251,7 @@ protected void runInContext() { metrics.clear(); } } catch (Exception e) { - logger.debug("Failed to get VM stats for host with ID: {}", host); + logger.debug("Failed to get VM stats for host: {}", host); continue; } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 2f19dffb7b3d..2772ce0944ad 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -922,7 +922,7 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept // Check if zone is disabled if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { - throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone)); + throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone: %s is currently disabled", zone)); } // If local storage is disabled then creation of volume with local disk @@ -3808,7 +3808,7 @@ private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Lon } if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException(String.format("VolumeId: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); + throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && BooleanUtils.toBoolean(_configDao.getValue("sp.bypass.secondary.storage")); diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index 1e60cade915c..d64a42efbecb 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -190,7 +190,7 @@ public Pair, Integer> getUsageRecords(ListUsageRecordsCmd //List records for all the accounts if the caller account is of type admin. //If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin ignoreAccountId = _accountService.isRootAdmin(caller.getId()); - logger.debug("Account details not available. Using userContext accountId: {}", caller); + logger.debug("Account details not available. Using userContext account: {}", caller); } // Check if a domain admin is allowed to access the requested domain id diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 825da62a3c3b..f33de020446a 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -767,7 +767,7 @@ protected void runInContext() { NicVO nic = _nicDao.findById(nicId); try { - logger.debug("Trying IP retrieval [id: {}, uuid: {}, name: {}], nic {}", vmId, vmUuid, vmName, nic); + logger.debug("Trying IP retrieval for VM [id: {}, uuid: {}, name: {}], nic {}", vmId, vmUuid, vmName, nic); Answer answer = _agentMgr.send(hostId, cmd); if (answer.getResult()) { String vmIp = answer.getDetails(); diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index 37b1797724d1..2e52d1ccc446 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -191,7 +191,7 @@ public List listBackupProviderOfferings(final Long zoneId) { throw new PermissionDeniedException("Parameter external can only be specified by a Root Admin, permission denied"); } final BackupProvider backupProvider = getBackupProvider(zoneId); - logger.debug("Listing external backup offerings for the backup provider configured for zone ID {}", dataCenterDao.findById(zoneId)); + logger.debug("Listing external backup offerings for the backup provider configured for zone {}", dataCenterDao.findById(zoneId)); return backupProvider.listBackupOfferings(zoneId); } diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java index b3f211184265..005e24a8bce2 100644 --- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java @@ -288,7 +288,7 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { VMTemplateStoragePoolVO sPoolRef = vmTemplatePoolDao.findByPoolTemplate(poolId, templateId, null); if (sPoolRef == null) { if (logger.isDebugEnabled()) { - logger.debug("Not found (template:{} pool: {}) in template_spool_ref, persisting it", template, pool); + logger.debug("Not found (template: {} pool: {}) in template_spool_ref, persisting it", template, pool); } DirectDownloadAnswer ans = (DirectDownloadAnswer) answer; sPoolRef = new VMTemplateStoragePoolVO(poolId, templateId, null); diff --git a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java index 7441d480628f..6dc7b9281ba5 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java +++ b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java @@ -100,7 +100,7 @@ public Boolean call() throws HACheckerException, HAFenceException, HARecoveryExc logger.warn("Exception occurred while running " + getTaskType() + " on a resource: " + e.getMessage(), e.getCause()); throwable = e.getCause(); } catch (TimeoutException e) { - logger.trace("{} operation timed out for resource id:{}", getTaskType(), resource); + logger.trace("{} operation timed out for resource: {}", getTaskType(), resource); } processResult(result, throwable); return result; diff --git a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java index 1db2ad432751..50ec8a827b43 100644 --- a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java @@ -965,7 +965,7 @@ public boolean applyRoutingFirewallRule(long id) { logger.error("Cannot apply routing firewall rule: {} as purpose {} is not {}", rule, rule.getPurpose(), FirewallRule.Purpose.Firewall); return false; } - logger.debug("Applying routing firewall rules for rule with ID: {}", rule); + logger.debug("Applying routing firewall rules for rule: {}", rule); List rules = new ArrayList<>(); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress)); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Ingress)); diff --git a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index e858e7efe4dc..a448c612ece9 100644 --- a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -287,7 +287,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // apply the gslb rule on to the back end gslb service providers on zones participating in gslb if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - logger.warn("Failed to add load balancer rules {} to global load balancer rule id {}", newLbRuleIds, gslbRule); + logger.warn("Failed to add load balancer rules {} to global load balancer rule {}", newLbRuleIds, gslbRule); CloudRuntimeException ex = new CloudRuntimeException("Failed to add load balancer rules to GSLB rule "); throw ex; } @@ -390,7 +390,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // apply the gslb rule on to the back end gslb service providers if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - logger.warn("Failed to remove load balancer rules {} from global load balancer rule id {}", lbRuleIdsToremove, gslbRule); + logger.warn("Failed to remove load balancer rules {} from global load balancer rule {}", lbRuleIdsToremove, gslbRule); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove load balancer rule ids from GSLB rule "); throw ex; } @@ -545,7 +545,7 @@ public GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalan _gslbRuleDao.update(gslbRule.getId(), gslbRule); try { - logger.debug("Updating global load balancer with id {}", gslbRule); + logger.debug("Updating global load balancer {}", gslbRule); // apply the gslb rule on to the back end gslb service providers on zones participating in gslb applyGlobalLoadBalancerRuleConfig(gslbRuleId, false); diff --git a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java index d13492ed2168..ef0f6f6b226d 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java @@ -268,17 +268,17 @@ protected Map getManagementNetworkAndIp(VirtualMachineTemplate continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.StaticNat)) { - logger.info("Network ID: {} does not support static nat, " + + logger.info("Network: {} does not support static nat, " + "skipping this network configuration for VNF appliance", network); continue; } if (network.getVpcId() != null) { - logger.info("Network ID: {} is a VPC tier, " + + logger.info("Network: {} is a VPC tier, " + "skipping this network configuration for VNF appliance", network); continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.Firewall)) { - logger.info("Network ID: {} does not support firewall, " + + logger.info("Network: {} does not support firewall, " + "skipping this network configuration for VNF appliance", network); continue; } From 8af0b13942cb62379b9d93edccebc5fb1618438d Mon Sep 17 00:00:00 2001 From: Vishesh Date: Fri, 3 Jan 2025 14:11:11 +0530 Subject: [PATCH 22/22] Fix toString method for StorageFilterTO.java --- api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java index e361e7a141fb..cbdb7922eb43 100644 --- a/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/StorageFilerTO.java @@ -19,6 +19,7 @@ import com.cloud.agent.api.LogLevel; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class StorageFilerTO { long id; @@ -73,6 +74,6 @@ protected StorageFilerTO() { @Override public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(host).append(":").append(port).append("|").append(path).append("]").toString(); + return String.format("Pool %s", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "uuid", "host", "port", "path")); } }