From d161a4ec30926f1e4338b0ac97581f8a45c932d5 Mon Sep 17 00:00:00 2001 From: zzhang Date: Fri, 23 Aug 2013 15:07:39 -0700 Subject: [PATCH 001/113] [HELIX-212] Rebalancer interface should have 1 function to compute the entire ideal state, rb=13694 --- .../java/org/apache/helix/HelixProperty.java | 8 + .../controller/rebalancer/AutoRebalancer.java | 306 +++--------------- .../rebalancer/CustomRebalancer.java | 22 +- .../controller/rebalancer/Rebalancer.java | 41 +-- .../rebalancer/SemiAutoRebalancer.java | 23 +- .../util/ConstraintBasedAssignment.java | 49 +++ .../stages/BestPossibleStateCalcStage.java | 13 +- .../stages/RebalanceIdealStateStage.java | 15 +- .../controller/stages/ResourceMapping.java | 58 ---- .../strategy/AutoRebalanceStrategy.java | 117 ++++++- .../org/apache/helix/model/IdealState.java | 30 ++ .../helix/model/ResourceAssignment.java | 68 +++- .../TestCustomizedIdealStateRebalancer.java | 31 +- 13 files changed, 379 insertions(+), 402 deletions(-) delete mode 100644 helix-core/src/main/java/org/apache/helix/controller/stages/ResourceMapping.java diff --git a/helix-core/src/main/java/org/apache/helix/HelixProperty.java b/helix-core/src/main/java/org/apache/helix/HelixProperty.java index 2e1923199a..9d394008f0 100644 --- a/helix-core/src/main/java/org/apache/helix/HelixProperty.java +++ b/helix-core/src/main/java/org/apache/helix/HelixProperty.java @@ -54,6 +54,14 @@ public HelixProperty(ZNRecord record) { _record = new ZNRecord(record); } + /** + * Initialize the property by copying from another property + * @param property + */ + public HelixProperty(HelixProperty property) { + _record = new ZNRecord(property.getRecord()); + } + /** * Get the property identifier * @return the property id diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java index 4dd5ea69f5..9564e35fb7 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java @@ -20,31 +20,27 @@ */ import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import org.apache.helix.HelixManager; import org.apache.helix.ZNRecord; import org.apache.helix.controller.rebalancer.util.ConstraintBasedAssignment; import org.apache.helix.controller.stages.ClusterDataCache; import org.apache.helix.controller.stages.CurrentStateOutput; -import org.apache.helix.controller.stages.ResourceMapping; import org.apache.helix.controller.strategy.AutoRebalanceStrategy; import org.apache.helix.controller.strategy.AutoRebalanceStrategy.DefaultPlacementScheme; import org.apache.helix.controller.strategy.AutoRebalanceStrategy.ReplicaPlacementScheme; -import org.apache.helix.model.CurrentState; import org.apache.helix.model.IdealState; import org.apache.helix.model.IdealState.RebalanceMode; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Partition; import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; import org.apache.helix.model.StateModelDefinition; import org.apache.log4j.Logger; @@ -72,8 +68,9 @@ public void init(HelixManager manager) { } @Override - public IdealState computeNewIdealState(String resourceName, IdealState currentIdealState, + public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { + // Compute a preference list based on the current ideal state List partitions = new ArrayList(currentIdealState.getPartitionSet()); String stateModelName = currentIdealState.getStateModelDefRef(); StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); @@ -81,10 +78,30 @@ public IdealState computeNewIdealState(String resourceName, IdealState currentId String replicas = currentIdealState.getReplicas(); LinkedHashMap stateCountMap = new LinkedHashMap(); - stateCountMap = stateCount(stateModelDef, liveInstance.size(), Integer.parseInt(replicas)); + stateCountMap = + ConstraintBasedAssignment.stateCount(stateModelDef, liveInstance.size(), + Integer.parseInt(replicas)); List liveNodes = new ArrayList(liveInstance.keySet()); Map> currentMapping = - currentMapping(currentStateOutput, resourceName, partitions, stateCountMap); + currentMapping(currentStateOutput, resource.getResourceName(), partitions, stateCountMap); + + // If there are nodes tagged with resource name, use only those nodes + Set taggedNodes = new HashSet(); + if (currentIdealState.getInstanceGroupTag() != null) { + for (String instanceName : liveNodes) { + if (clusterData.getInstanceConfigMap().get(instanceName) + .containsTag(currentIdealState.getInstanceGroupTag())) { + taggedNodes.add(instanceName); + } + } + } + if (taggedNodes.size() > 0) { + if (LOG.isInfoEnabled()) { + LOG.info("found the following instances with tag " + currentIdealState.getResourceName() + + " " + taggedNodes); + } + liveNodes = new ArrayList(taggedNodes); + } List allNodes = new ArrayList(clusterData.getInstanceConfigMap().keySet()); int maxPartition = currentIdealState.getMaxPartitionsPerInstance(); @@ -99,8 +116,8 @@ public IdealState computeNewIdealState(String resourceName, IdealState currentId ReplicaPlacementScheme placementScheme = new DefaultPlacementScheme(); placementScheme.init(_manager); _algorithm = - new AutoRebalanceStrategy(resourceName, partitions, stateCountMap, maxPartition, - placementScheme); + new AutoRebalanceStrategy(resource.getResourceName(), partitions, stateCountMap, + maxPartition, placementScheme); ZNRecord newMapping = _algorithm.computePartitionAssignment(liveNodes, currentMapping, allNodes); @@ -108,55 +125,31 @@ public IdealState computeNewIdealState(String resourceName, IdealState currentId LOG.info("newMapping: " + newMapping); } - IdealState newIdealState = new IdealState(resourceName); + IdealState newIdealState = new IdealState(resource.getResourceName()); newIdealState.getRecord().setSimpleFields(currentIdealState.getRecord().getSimpleFields()); newIdealState.setRebalanceMode(RebalanceMode.FULL_AUTO); newIdealState.getRecord().setListFields(newMapping.getListFields()); - return newIdealState; - } - - /** - * @return state count map: state->count - */ - private LinkedHashMap stateCount(StateModelDefinition stateModelDef, - int liveNodesNb, int totalReplicas) { - LinkedHashMap stateCountMap = new LinkedHashMap(); - List statesPriorityList = stateModelDef.getStatesPriorityList(); - - int replicas = totalReplicas; - for (String state : statesPriorityList) { - String num = stateModelDef.getNumInstancesPerState(state); - if ("N".equals(num)) { - stateCountMap.put(state, liveNodesNb); - } else if ("R".equals(num)) { - // wait until we get the counts for all other states - continue; - } else { - int stateCount = -1; - try { - stateCount = Integer.parseInt(num); - } catch (Exception e) { - // LOG.error("Invalid count for state: " + state + ", count: " + num + - // ", use -1 instead"); - } - if (stateCount > 0) { - stateCountMap.put(state, stateCount); - replicas -= stateCount; - } - } + // compute a full partition mapping for the resource + if (LOG.isDebugEnabled()) { + LOG.debug("Processing resource:" + resource.getResourceName()); } - - // get state count for R - for (String state : statesPriorityList) { - String num = stateModelDef.getNumInstancesPerState(state); - if ("R".equals(num)) { - stateCountMap.put(state, replicas); - // should have at most one state using R - break; - } + ResourceAssignment partitionMapping = new ResourceAssignment(resource.getResourceName()); + for (String partitionName : partitions) { + Partition partition = new Partition(partitionName); + Map currentStateMap = + currentStateOutput.getCurrentStateMap(resource.getResourceName(), partition); + Set disabledInstancesForPartition = + clusterData.getDisabledInstancesForPartition(partition.toString()); + List preferenceList = + ConstraintBasedAssignment.getPreferenceList(clusterData, partition, newIdealState, + stateModelDef); + Map bestStateForPartition = + ConstraintBasedAssignment.computeAutoBestStateForPartition(clusterData, stateModelDef, + preferenceList, currentStateMap, disabledInstancesForPartition); + partitionMapping.addReplicaMap(partition, bestStateForPartition); } - return stateCountMap; + return partitionMapping; } private Map> currentMapping(CurrentStateOutput currentStateOutput, @@ -186,211 +179,4 @@ private Map> currentMapping(CurrentStateOutput curre } return map; } - - @Override - public ResourceMapping computeBestPossiblePartitionState(ClusterDataCache cache, - IdealState idealState, Resource resource, CurrentStateOutput currentStateOutput) { - if (LOG.isDebugEnabled()) { - LOG.debug("Processing resource:" + resource.getResourceName()); - } - String stateModelDefName = idealState.getStateModelDefRef(); - StateModelDefinition stateModelDef = cache.getStateModelDef(stateModelDefName); - calculateAutoBalancedIdealState(cache, idealState, stateModelDef); - ResourceMapping partitionMapping = new ResourceMapping(); - for (Partition partition : resource.getPartitions()) { - Map currentStateMap = - currentStateOutput.getCurrentStateMap(resource.getResourceName(), partition); - Set disabledInstancesForPartition = - cache.getDisabledInstancesForPartition(partition.toString()); - List preferenceList = - ConstraintBasedAssignment.getPreferenceList(cache, partition, idealState, stateModelDef); - Map bestStateForPartition = - ConstraintBasedAssignment.computeAutoBestStateForPartition(cache, stateModelDef, - preferenceList, currentStateMap, disabledInstancesForPartition); - partitionMapping.addReplicaMap(partition, bestStateForPartition); - } - return partitionMapping; - } - - /** - * Compute best state for resource in AUTO_REBALANCE ideal state mode. the algorithm - * will make sure that the master partition are evenly distributed; Also when instances - * are added / removed, the amount of diff in master partitions are minimized - * @param cache - * @param idealState - * @param instancePreferenceList - * @param stateModelDef - * @param currentStateOutput - * @return - */ - private void calculateAutoBalancedIdealState(ClusterDataCache cache, IdealState idealState, - StateModelDefinition stateModelDef) { - String topStateValue = stateModelDef.getStatesPriorityList().get(0); - Set liveInstances = cache.getLiveInstances().keySet(); - Set taggedInstances = new HashSet(); - - // If there are instances tagged with resource name, use only those instances - if (idealState.getInstanceGroupTag() != null) { - for (String instanceName : liveInstances) { - if (cache.getInstanceConfigMap().get(instanceName) - .containsTag(idealState.getInstanceGroupTag())) { - taggedInstances.add(instanceName); - } - } - } - if (taggedInstances.size() > 0) { - if (LOG.isInfoEnabled()) { - LOG.info("found the following instances with tag " + idealState.getResourceName() + " " - + taggedInstances); - } - liveInstances = taggedInstances; - } - // Obtain replica number - int replicas = 1; - try { - replicas = Integer.parseInt(idealState.getReplicas()); - } catch (Exception e) { - LOG.error("", e); - } - // Init for all partitions with empty list - Map> defaultListFields = new TreeMap>(); - List emptyList = new ArrayList(0); - for (String partition : idealState.getPartitionSet()) { - defaultListFields.put(partition, emptyList); - } - idealState.getRecord().setListFields(defaultListFields); - // Return if no live instance - if (liveInstances.size() == 0) { - if (LOG.isInfoEnabled()) { - LOG.info("No live instances, return. Idealstate : " + idealState.getResourceName()); - } - return; - } - Map> masterAssignmentMap = new HashMap>(); - for (String instanceName : liveInstances) { - masterAssignmentMap.put(instanceName, new ArrayList()); - } - Set orphanedPartitions = new HashSet(); - orphanedPartitions.addAll(idealState.getPartitionSet()); - // Go through all current states and fill the assignments - for (String liveInstanceName : liveInstances) { - CurrentState currentState = - cache.getCurrentState(liveInstanceName, - cache.getLiveInstances().get(liveInstanceName).getSessionId()) - .get(idealState.getId()); - if (currentState != null) { - Map partitionStates = currentState.getPartitionStateMap(); - for (String partitionName : partitionStates.keySet()) { - String state = partitionStates.get(partitionName); - if (state.equals(topStateValue)) { - masterAssignmentMap.get(liveInstanceName).add(partitionName); - orphanedPartitions.remove(partitionName); - } - } - } - } - List orphanedPartitionsList = new ArrayList(); - orphanedPartitionsList.addAll(orphanedPartitions); - int maxPartitionsPerInstance = idealState.getMaxPartitionsPerInstance(); - normalizeAssignmentMap(masterAssignmentMap, orphanedPartitionsList, maxPartitionsPerInstance); - idealState.getRecord().setListFields( - generateListFieldFromMasterAssignment(masterAssignmentMap, replicas)); - } - - /** - * Given the current master assignment map and the partitions not hosted, generate an - * evenly distributed partition assignment map - * @param masterAssignmentMap - * current master assignment map - * @param orphanPartitions - * partitions not hosted by any instance - * @return - */ - private void normalizeAssignmentMap(Map> masterAssignmentMap, - List orphanPartitions, int maxPartitionsPerInstance) { - int totalPartitions = 0; - String[] instanceNames = new String[masterAssignmentMap.size()]; - masterAssignmentMap.keySet().toArray(instanceNames); - Arrays.sort(instanceNames); - // Find out total partition number - for (String key : masterAssignmentMap.keySet()) { - totalPartitions += masterAssignmentMap.get(key).size(); - Collections.sort(masterAssignmentMap.get(key)); - } - totalPartitions += orphanPartitions.size(); - - // Find out how many partitions an instance should host - int partitionNumber = totalPartitions / masterAssignmentMap.size(); - int leave = totalPartitions % masterAssignmentMap.size(); - - for (int i = 0; i < instanceNames.length; i++) { - int targetPartitionNo = leave > 0 ? (partitionNumber + 1) : partitionNumber; - leave--; - // For hosts that has more partitions, move those partitions to "orphaned" - while (masterAssignmentMap.get(instanceNames[i]).size() > targetPartitionNo) { - int lastElementIndex = masterAssignmentMap.get(instanceNames[i]).size() - 1; - orphanPartitions.add(masterAssignmentMap.get(instanceNames[i]).get(lastElementIndex)); - masterAssignmentMap.get(instanceNames[i]).remove(lastElementIndex); - } - } - leave = totalPartitions % masterAssignmentMap.size(); - Collections.sort(orphanPartitions); - // Assign "orphaned" partitions to hosts that do not have enough partitions - for (int i = 0; i < instanceNames.length; i++) { - int targetPartitionNo = leave > 0 ? (partitionNumber + 1) : partitionNumber; - leave--; - if (targetPartitionNo > maxPartitionsPerInstance) { - targetPartitionNo = maxPartitionsPerInstance; - } - while (masterAssignmentMap.get(instanceNames[i]).size() < targetPartitionNo) { - int lastElementIndex = orphanPartitions.size() - 1; - masterAssignmentMap.get(instanceNames[i]).add(orphanPartitions.get(lastElementIndex)); - orphanPartitions.remove(lastElementIndex); - } - } - if (orphanPartitions.size() > 0) { - LOG.warn("orphanPartitions still contains elements"); - } - } - - /** - * Generate full preference list from the master assignment map evenly distribute the - * slave partitions mastered on a host to other hosts - * @param masterAssignmentMap - * current master assignment map - * @param orphanPartitions - * partitions not hosted by any instance - * @return - */ - private Map> generateListFieldFromMasterAssignment( - Map> masterAssignmentMap, int replicas) { - Map> listFields = new HashMap>(); - int slaves = replicas - 1; - String[] instanceNames = new String[masterAssignmentMap.size()]; - masterAssignmentMap.keySet().toArray(instanceNames); - Arrays.sort(instanceNames); - - for (int i = 0; i < instanceNames.length; i++) { - String instanceName = instanceNames[i]; - List otherInstances = new ArrayList(masterAssignmentMap.size() - 1); - for (int x = 0; x < instanceNames.length - 1; x++) { - int index = (x + i + 1) % instanceNames.length; - otherInstances.add(instanceNames[index]); - } - - List partitionList = masterAssignmentMap.get(instanceName); - for (int j = 0; j < partitionList.size(); j++) { - String partitionName = partitionList.get(j); - listFields.put(partitionName, new ArrayList()); - listFields.get(partitionName).add(instanceName); - - int slavesCanAssign = Math.min(slaves, otherInstances.size()); - for (int k = 0; k < slavesCanAssign; k++) { - int index = (j + k + 1) % otherInstances.size(); - listFields.get(partitionName).add(otherInstances.get(index)); - } - } - } - return listFields; - } } diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java index 17dc5c80cc..8557fa0d8c 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java @@ -27,11 +27,11 @@ import org.apache.helix.HelixManager; import org.apache.helix.controller.stages.ClusterDataCache; import org.apache.helix.controller.stages.CurrentStateOutput; -import org.apache.helix.controller.stages.ResourceMapping; import org.apache.helix.model.IdealState; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Partition; import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; import org.apache.helix.model.StateModelDefinition; import org.apache.log4j.Logger; @@ -53,29 +53,23 @@ public void init(HelixManager manager) { } @Override - public IdealState computeNewIdealState(String resourceName, IdealState currentIdealState, + public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { - return currentIdealState; - } - - @Override - public ResourceMapping computeBestPossiblePartitionState(ClusterDataCache cache, - IdealState idealState, Resource resource, CurrentStateOutput currentStateOutput) { - String stateModelDefName = idealState.getStateModelDefRef(); - StateModelDefinition stateModelDef = cache.getStateModelDef(stateModelDefName); + String stateModelDefName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelDefName); if (LOG.isDebugEnabled()) { LOG.debug("Processing resource:" + resource.getResourceName()); } - ResourceMapping partitionMapping = new ResourceMapping(); + ResourceAssignment partitionMapping = new ResourceAssignment(resource.getResourceName()); for (Partition partition : resource.getPartitions()) { Map currentStateMap = currentStateOutput.getCurrentStateMap(resource.getResourceName(), partition); Set disabledInstancesForPartition = - cache.getDisabledInstancesForPartition(partition.toString()); + clusterData.getDisabledInstancesForPartition(partition.toString()); Map idealStateMap = - idealState.getInstanceStateMap(partition.getPartitionName()); + currentIdealState.getInstanceStateMap(partition.getPartitionName()); Map bestStateForPartition = - computeCustomizedBestStateForPartition(cache, stateModelDef, idealStateMap, + computeCustomizedBestStateForPartition(clusterData, stateModelDef, idealStateMap, currentStateMap, disabledInstancesForPartition); partitionMapping.addReplicaMap(partition, bestStateForPartition); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/Rebalancer.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/Rebalancer.java index a0cfbb77b6..e4f165bf27 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/Rebalancer.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/Rebalancer.java @@ -22,9 +22,9 @@ import org.apache.helix.HelixManager; import org.apache.helix.controller.stages.ClusterDataCache; import org.apache.helix.controller.stages.CurrentStateOutput; -import org.apache.helix.controller.stages.ResourceMapping; import org.apache.helix.model.IdealState; import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; /** * Allows one to come up with custom implementation of a rebalancer.
@@ -32,33 +32,24 @@ * Simply return the newIdealState for a resource in this method.
*/ public interface Rebalancer { - void init(HelixManager manager); - /** - * This method provides all the relevant information needed to rebalance a resource. - * If you need additional information use manager.getAccessor to read the cluster data. - * This allows one to compute the newIdealState according to app specific requirement. - * @param resourceName Name of the resource to be rebalanced - * @param currentIdealState - * @param currentStateOutput - * Provides the current state and pending state transition for all - * partitions - * @param clusterData Provides additional methods to retrieve cluster data. - * @return + * Initialize the rebalancer with a HelixManager if necessary + * @param manager */ - IdealState computeNewIdealState(String resourceName, IdealState currentIdealState, - final CurrentStateOutput currentStateOutput, final ClusterDataCache clusterData); + void init(HelixManager manager); /** - * Given an ideal state for a resource and the liveness of instances, compute the best possible - * state assignment for each partition's replicas. - * @param cache - * @param idealState - * @param resource - * @param currentStateOutput - * Provides the current state and pending state transitions for all partitions - * @return + * Given an ideal state for a resource and liveness of instances, compute a assignment of + * instances and states to each partition of a resource. This method provides all the relevant + * information needed to rebalance a resource. If you need additional information use + * manager.getAccessor to read the cluster data. This allows one to compute the newIdealState + * according to app specific requirements. + * @param resourceName the resource for which a mapping will be computed + * @param currentIdealState the IdealState that corresponds to this resource + * @param currentStateOutput the current states of all partitions + * @param clusterData cache of the cluster state */ - ResourceMapping computeBestPossiblePartitionState(ClusterDataCache cache, IdealState idealState, - Resource resource, CurrentStateOutput currentStateOutput); + ResourceAssignment computeResourceMapping(final Resource resource, + final IdealState currentIdealState, final CurrentStateOutput currentStateOutput, + final ClusterDataCache clusterData); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/SemiAutoRebalancer.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/SemiAutoRebalancer.java index bc682ff8a6..b096817d51 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/SemiAutoRebalancer.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/SemiAutoRebalancer.java @@ -27,10 +27,10 @@ import org.apache.helix.controller.rebalancer.util.ConstraintBasedAssignment; import org.apache.helix.controller.stages.ClusterDataCache; import org.apache.helix.controller.stages.CurrentStateOutput; -import org.apache.helix.controller.stages.ResourceMapping; import org.apache.helix.model.IdealState; import org.apache.helix.model.Partition; import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; import org.apache.helix.model.StateModelDefinition; import org.apache.log4j.Logger; @@ -52,29 +52,24 @@ public void init(HelixManager manager) { } @Override - public IdealState computeNewIdealState(String resourceName, IdealState currentIdealState, + public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { - return currentIdealState; - } - - @Override - public ResourceMapping computeBestPossiblePartitionState(ClusterDataCache cache, - IdealState idealState, Resource resource, CurrentStateOutput currentStateOutput) { - String stateModelDefName = idealState.getStateModelDefRef(); - StateModelDefinition stateModelDef = cache.getStateModelDef(stateModelDefName); + String stateModelDefName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelDefName); if (LOG.isDebugEnabled()) { LOG.debug("Processing resource:" + resource.getResourceName()); } - ResourceMapping partitionMapping = new ResourceMapping(); + ResourceAssignment partitionMapping = new ResourceAssignment(resource.getResourceName()); for (Partition partition : resource.getPartitions()) { Map currentStateMap = currentStateOutput.getCurrentStateMap(resource.getResourceName(), partition); Set disabledInstancesForPartition = - cache.getDisabledInstancesForPartition(partition.toString()); + clusterData.getDisabledInstancesForPartition(partition.toString()); List preferenceList = - ConstraintBasedAssignment.getPreferenceList(cache, partition, idealState, stateModelDef); + ConstraintBasedAssignment.getPreferenceList(clusterData, partition, currentIdealState, + stateModelDef); Map bestStateForPartition = - ConstraintBasedAssignment.computeAutoBestStateForPartition(cache, stateModelDef, + ConstraintBasedAssignment.computeAutoBestStateForPartition(clusterData, stateModelDef, preferenceList, currentStateMap, disabledInstancesForPartition); partitionMapping.addReplicaMap(partition, bestStateForPartition); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java index 3fd52f4d96..d2dbdef995 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -139,4 +140,52 @@ public static Map computeAutoBestStateForPartition(ClusterDataCa } return instanceStateMap; } + + /** + * Get the number of replicas that should be in each state for a partition + * @param stateModelDef StateModelDefinition object + * @param liveNodesNb number of live nodes + * @param total number of replicas + * @return state count map: state->count + */ + public static LinkedHashMap stateCount(StateModelDefinition stateModelDef, + int liveNodesNb, int totalReplicas) { + LinkedHashMap stateCountMap = new LinkedHashMap(); + List statesPriorityList = stateModelDef.getStatesPriorityList(); + + int replicas = totalReplicas; + for (String state : statesPriorityList) { + String num = stateModelDef.getNumInstancesPerState(state); + if ("N".equals(num)) { + stateCountMap.put(state, liveNodesNb); + } else if ("R".equals(num)) { + // wait until we get the counts for all other states + continue; + } else { + int stateCount = -1; + try { + stateCount = Integer.parseInt(num); + } catch (Exception e) { + // LOG.error("Invalid count for state: " + state + ", count: " + num + + // ", use -1 instead"); + } + + if (stateCount > 0) { + stateCountMap.put(state, stateCount); + replicas -= stateCount; + } + } + } + + // get state count for R + for (String state : statesPriorityList) { + String num = stateModelDef.getNumInstancesPerState(state); + if ("R".equals(num)) { + stateCountMap.put(state, replicas); + // should have at most one state using R + break; + } + } + return stateCountMap; + } } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java index 598c3183d8..e812e16700 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java @@ -21,6 +21,7 @@ import java.util.Map; +import org.apache.helix.HelixManager; import org.apache.helix.controller.pipeline.AbstractBaseStage; import org.apache.helix.controller.pipeline.StageException; import org.apache.helix.controller.rebalancer.AutoRebalancer; @@ -28,9 +29,10 @@ import org.apache.helix.controller.rebalancer.Rebalancer; import org.apache.helix.controller.rebalancer.SemiAutoRebalancer; import org.apache.helix.model.IdealState; +import org.apache.helix.model.IdealState.RebalanceMode; import org.apache.helix.model.Partition; import org.apache.helix.model.Resource; -import org.apache.helix.model.IdealState.RebalanceMode; +import org.apache.helix.model.ResourceAssignment; import org.apache.helix.util.HelixUtil; import org.apache.log4j.Logger; @@ -112,11 +114,12 @@ private BestPossibleStateOutput compute(ClusterEvent event, Map newStateMap = partitionStateAssignment.getInstanceStateMap(partition); + Map newStateMap = partitionStateAssignment.getReplicaMap(partition); output.setState(resourceName, partition, newStateMap); } } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java index cf1633cd49..d82ee2f2af 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java @@ -27,6 +27,8 @@ import org.apache.helix.controller.rebalancer.Rebalancer; import org.apache.helix.model.IdealState; import org.apache.helix.model.IdealState.RebalanceMode; +import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; import org.apache.helix.util.HelixUtil; import org.apache.log4j.Logger; @@ -57,10 +59,15 @@ public void process(ClusterEvent event) throws Exception { Rebalancer balancer = (Rebalancer) (HelixUtil.loadClass(getClass(), rebalancerClassName).newInstance()); balancer.init(manager); - IdealState newIdealState = - balancer.computeNewIdealState(resourceName, idealStateMap.get(resourceName), - currentStateOutput, cache); - updatedIdealStates.put(resourceName, newIdealState); + Resource resource = new Resource(resourceName); + for (String partitionName : currentIdealState.getPartitionSet()) { + resource.addPartition(partitionName); + } + ResourceAssignment resourceAssignment = + balancer.computeResourceMapping(resource, currentIdealState, currentStateOutput, + cache); + currentIdealState.updateFromAssignment(resourceAssignment); + updatedIdealStates.put(resourceName, currentIdealState); } catch (Exception e) { LOG.error("Exception while invoking custom rebalancer class:" + rebalancerClassName, e); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceMapping.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceMapping.java deleted file mode 100644 index 2609791846..0000000000 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceMapping.java +++ /dev/null @@ -1,58 +0,0 @@ -package org.apache.helix.controller.stages; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.apache.helix.model.Partition; - -/** - * Represents the assignments of replicas for an entire resource, keyed on partitions of the - * resource. Each partition has its replicas assigned to a node, and each replica is in a state. - */ -public class ResourceMapping { - - private final Map> _resourceMap; - - public ResourceMapping() { - this(new HashMap>()); - } - - public ResourceMapping(Map> resourceMap) { - _resourceMap = resourceMap; - } - - public Map> getResourceMap() { - return _resourceMap; - } - - public Map getInstanceStateMap(Partition partition) { - if (_resourceMap.containsKey(partition)) { - return _resourceMap.get(partition); - } - return Collections.emptyMap(); - } - - public void addReplicaMap(Partition partition, Map replicaMap) { - _resourceMap.put(partition, replicaMap); - } -} diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java index 76560a4b82..72046bf22f 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -54,6 +55,15 @@ public class AutoRebalanceStrategy { private Map _existingNonPreferredAssignment; private Set _orphaned; + /** + * Initialize this strategy for a resource + * @param resourceName the resource for which an assignment will be computed + * @param partitions the partition names for the resource + * @param states the states and the number of replicas that should be in each state + * @param maximumPerNode the maximum number of replicas any note can hold + * @param placementScheme the scheme to use for preferred replica locations. If null, this is + * {@link DefaultPlacementScheme} + */ public AutoRebalanceStrategy(String resourceName, final List partitions, final LinkedHashMap states, int maximumPerNode, ReplicaPlacementScheme placementScheme) { @@ -68,11 +78,22 @@ public AutoRebalanceStrategy(String resourceName, final List partitions, } } + /** + * Initialize the strategy with a default placement scheme and no + * @see #AutoRebalanceStrategy(String, List, LinkedHashMap, int, ReplicaPlacementScheme) + */ public AutoRebalanceStrategy(String resourceName, final List partitions, final LinkedHashMap states) { this(resourceName, partitions, states, Integer.MAX_VALUE, new DefaultPlacementScheme()); } + /** + * Determine a preference list and mapping of partitions to nodes for all replicas + * @param liveNodes the current list of live participants + * @param currentMapping the current assignment of replicas to nodes + * @param allNodes the full list of known nodes in the system + * @return the preference list and replica mapping + */ public ZNRecord computePartitionAssignment(final List liveNodes, final Map> currentMapping, final List allNodes) { int numReplicas = countStateReplicas(); @@ -276,15 +297,6 @@ private void prepareResult(ZNRecord znRecord) { znRecord.setMapField(partition, new TreeMap()); znRecord.setListField(partition, new ArrayList()); } - for (Node node : _liveNodesList) { - for (Replica replica : node.preferred) { - znRecord.getMapField(replica.partition).put(node.id, _stateMap.get(replica.replicaId)); - } - for (Replica replica : node.nonPreferred) { - znRecord.getMapField(replica.partition).put(node.id, _stateMap.get(replica.replicaId)); - } - } - int count = countStateReplicas(); for (int replicaId = 0; replicaId < count; replicaId++) { for (Node node : _liveNodesList) { @@ -300,6 +312,93 @@ private void prepareResult(ZNRecord znRecord) { } } } + normalizePreferenceLists(znRecord.getListFields()); + + for (Node node : _liveNodesList) { + for (Replica replica : node.preferred) { + znRecord.getMapField(replica.partition).put(node.id, _stateMap.get(replica.replicaId)); + } + for (Replica replica : node.nonPreferred) { + znRecord.getMapField(replica.partition).put(node.id, _stateMap.get(replica.replicaId)); + } + } + } + + /** + * Adjust preference lists to reduce the number of same replicas on an instance + * @param preferenceLists map of (partition --> list of nodes) + */ + private void normalizePreferenceLists(Map> preferenceLists) { + Map> nodeReplicaCounts = + new HashMap>(); + for (String partition : preferenceLists.keySet()) { + normalizePreferenceList(preferenceLists.get(partition), nodeReplicaCounts); + } + } + + /** + * Adjust a single preference list for replica assignment imbalance + * @param preferenceList list of node names + * @param nodeReplicaCounts map of (node --> replica id --> count) + */ + private void normalizePreferenceList(List preferenceList, + Map> nodeReplicaCounts) { + Set notAssigned = new LinkedHashSet(preferenceList); + List newPreferenceList = new ArrayList(); + int replicas = Math.min(countStateReplicas(), preferenceList.size()); + for (int i = 0; i < replicas; i++) { + String node = getMinimumNodeForReplica(i, notAssigned, nodeReplicaCounts); + newPreferenceList.add(node); + notAssigned.remove(node); + Map counts = nodeReplicaCounts.get(node); + counts.put(i, counts.get(i) + 1); + } + preferenceList.clear(); + preferenceList.addAll(newPreferenceList); + } + + /** + * Get the node which hosts the fewest of a given replica + * @param replicaId the replica + * @param nodes nodes to check + * @param nodeReplicaCounts current assignment of replicas + * @return the node most willing to accept the replica + */ + private String getMinimumNodeForReplica(int replicaId, Set nodes, + Map> nodeReplicaCounts) { + String minimalNode = null; + int minimalCount = Integer.MAX_VALUE; + for (String node : nodes) { + int count = getReplicaCountForNode(replicaId, node, nodeReplicaCounts); + if (count < minimalCount) { + minimalCount = count; + minimalNode = node; + } + } + return minimalNode; + } + + /** + * Safe check for the number of replicas of a given id assiged to a node + * @param replicaId the replica to assign + * @param node the node to check + * @param nodeReplicaCounts a map of node to replica id and counts + * @return the number of currently assigned replicas of the given id + */ + private int getReplicaCountForNode(int replicaId, String node, + Map> nodeReplicaCounts) { + if (!nodeReplicaCounts.containsKey(node)) { + Map replicaCounts = new HashMap(); + replicaCounts.put(replicaId, 0); + nodeReplicaCounts.put(node, replicaCounts); + return 0; + } + Map replicaCounts = nodeReplicaCounts.get(node); + if (!replicaCounts.containsKey(replicaId)) { + replicaCounts.put(replicaId, 0); + return 0; + } + return replicaCounts.get(replicaId); } /** diff --git a/helix-core/src/main/java/org/apache/helix/model/IdealState.java b/helix-core/src/main/java/org/apache/helix/model/IdealState.java index 463369a4ee..59bc59c608 100644 --- a/helix-core/src/main/java/org/apache/helix/model/IdealState.java +++ b/helix-core/src/main/java/org/apache/helix/model/IdealState.java @@ -32,6 +32,8 @@ import org.apache.helix.ZNRecord; import org.apache.log4j.Logger; +import org.apache.helix.controller.rebalancer.Rebalancer; + /** * The ideal states of all partitions in a resource */ @@ -216,6 +218,15 @@ public Set getPartitionSet() { } } + /** + * Set the current mapping of a partition + * @param partition the partition to set + * @param instanceStateMap (instance name, state) pairs + */ + public void setInstanceStateMap(String partition, Map instanceStateMap) { + _record.setMapField(partition, instanceStateMap); + } + /** * Get the current mapping of a partition * @param partitionName the name of the partition @@ -256,6 +267,15 @@ public Set getInstanceSet(String partitionName) { } + /** + * Set the preference list of a partition + * @param partitionName the name of the partition to set + * @param preferenceList a list of instances that can serve replicas of the partition + */ + public void setPreferenceList(String partitionName, List preferenceList) { + _record.setListField(partitionName, preferenceList); + } + /** * Get the preference list of a partition * @param partitionName the name of the partition @@ -443,6 +463,16 @@ public String getInstanceGroupTag() { return _record.getSimpleField(IdealStateProperty.INSTANCE_GROUP_TAG.toString()); } + public void updateFromAssignment(ResourceAssignment assignment) { + _record.getMapFields().clear(); + _record.getListFields().clear(); + for (Partition partition : assignment.getMappedPartitions()) { + Map replicaMap = assignment.getReplicaMap(partition); + setInstanceStateMap(partition.getPartitionName(), replicaMap); + setPreferenceList(partition.getPartitionName(), new ArrayList(replicaMap.keySet())); + } + } + private RebalanceMode normalizeRebalanceMode(IdealStateModeProperty mode) { RebalanceMode property; switch (mode) { diff --git a/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java b/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java index b0d7f1fdc8..2b3d14dfc6 100644 --- a/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java +++ b/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java @@ -19,6 +19,72 @@ * under the License. */ -public class ResourceAssignment { +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import org.apache.helix.HelixProperty; + +/** + * Represents the assignments of replicas for an entire resource, keyed on partitions of the + * resource. Each partition has its replicas assigned to a node, and each replica is in a state. + * For example, if there is a partition p with 2 replicas, a valid assignment is:
+ *
+ * p: {(n1, s1), (n2, s2)}
+ *
+ * This means one replica of p is located at node n1 and is in state s1, and another is in node n2 + * and is in state s2. n1 cannot be equal to n2, but s1 can be equal to s2 if at least two replicas + * can be in s1. + */ +public class ResourceAssignment extends HelixProperty { + + /** + * Initialize an empty mapping + * @param resourceName the resource being mapped + */ + public ResourceAssignment(String resourceName) { + super(resourceName); + } + + /** + * Initialize a mapping from an existing ResourceMapping + * @param existingMapping pre-populated ResourceMapping + */ + public ResourceAssignment(ResourceAssignment existingMapping) { + super(existingMapping); + } + + /** + * Get the currently mapped partitions + * @return list of Partition objects + */ + public List getMappedPartitions() { + List partitions = new ArrayList(); + for (String partitionName : _record.getMapFields().keySet()) { + partitions.add(new Partition(partitionName)); + } + return partitions; + } + + /** + * Get the instance, state pairs for a partition + * @param partition the Partition to look up + * @return map of (instance name, state) + */ + public Map getReplicaMap(Partition partition) { + if (_record.getMapFields().containsKey(partition.getPartitionName())) { + return _record.getMapField(partition.getPartitionName()); + } + return Collections.emptyMap(); + } + + /** + * Add instance, state pairs for a partition + * @param partition the partition to set + * @param replicaMap map of (instance name, state) + */ + public void addReplicaMap(Partition partition, Map replicaMap) { + _record.setMapField(partition.getPartitionName(), replicaMap); + } } diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java index 7811c0d3b9..70ff6bd029 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java @@ -24,9 +24,9 @@ import org.apache.helix.HelixDataAccessor; import org.apache.helix.HelixManager; -import org.apache.helix.ZNRecord; import org.apache.helix.PropertyKey.Builder; -import org.apache.helix.controller.rebalancer.SemiAutoRebalancer; +import org.apache.helix.ZNRecord; +import org.apache.helix.controller.rebalancer.Rebalancer; import org.apache.helix.controller.stages.ClusterDataCache; import org.apache.helix.controller.stages.CurrentStateOutput; import org.apache.helix.manager.zk.ZKHelixDataAccessor; @@ -36,6 +36,9 @@ import org.apache.helix.model.IdealState; import org.apache.helix.model.IdealState.IdealStateProperty; import org.apache.helix.model.IdealState.RebalanceMode; +import org.apache.helix.model.Partition; +import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; import org.apache.helix.tools.ClusterStateVerifier; import org.apache.helix.tools.ClusterStateVerifier.ZkVerifier; import org.testng.Assert; @@ -46,7 +49,7 @@ public class TestCustomizedIdealStateRebalancer extends String db2 = TEST_DB + "2"; static boolean testRebalancerCreated = false; - public static class TestRebalancer extends SemiAutoRebalancer { + public static class TestRebalancer implements Rebalancer { @Override public void init(HelixManager manager) { @@ -54,18 +57,22 @@ public void init(HelixManager manager) { } @Override - public IdealState computeNewIdealState(String resourceName, IdealState currentIdealState, + public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { - for (String partition : currentIdealState.getPartitionSet()) { - String instance = currentIdealState.getPreferenceList(partition).get(0); - currentIdealState.getPreferenceList(partition).clear(); - currentIdealState.getPreferenceList(partition).add(instance); - - currentIdealState.getInstanceStateMap(partition).clear(); - currentIdealState.getInstanceStateMap(partition).put(instance, "MASTER"); + ResourceAssignment resourceMapping = new ResourceAssignment(resource.getResourceName()); + for (Partition partition : resource.getPartitions()) { + String partitionName = partition.getPartitionName(); + String instance = currentIdealState.getPreferenceList(partitionName).get(0); + currentIdealState.getPreferenceList(partitionName).clear(); + currentIdealState.getPreferenceList(partitionName).add(instance); + + currentIdealState.getInstanceStateMap(partitionName).clear(); + currentIdealState.getInstanceStateMap(partitionName).put(instance, "MASTER"); + resourceMapping.addReplicaMap(partition, + currentIdealState.getInstanceStateMap(partitionName)); } currentIdealState.setReplicas("1"); - return currentIdealState; + return resourceMapping; } } From e5f2ef7efb88d6781ef3351c0aebbf155c760b01 Mon Sep 17 00:00:00 2001 From: Kishore Gopalakrishna Date: Mon, 26 Aug 2013 08:14:50 -0700 Subject: [PATCH 002/113] [HELIX-224] Moving examples package to separate module helix-examples --- helix-core/pom.xml | 8 - helix-examples/.gitignore | 16 + helix-examples/DISCLAIMER | 15 + helix-examples/LICENSE | 273 ++++++++++++++++++ helix-examples/NOTICE | 30 ++ helix-examples/pom.xml | 104 +++++++ helix-examples/src/assemble/assembly.xml | 60 ++++ .../helix/examples/BootstrapHandler.java | 0 .../helix/examples/BootstrapProcess.java | 0 .../helix/examples/DummyParticipant.java | 0 .../apache/helix/examples/ExampleHelper.java | 0 .../apache/helix/examples/ExampleProcess.java | 0 .../examples/IdealStateBuilderExample.java | 0 .../helix/examples/IdealStateExample.java | 0 .../LeaderStandbyStateModelFactory.java | 0 .../MasterSlaveStateModelFactory.java | 0 .../OnlineOfflineStateModelFactory.java | 0 .../org/apache/helix/examples/Quickstart.java | 0 .../apache/helix/examples/package-info.java | 0 helix-examples/src/test/conf/testng.xml | 27 ++ pom.xml | 1 + src/site/markdown/Quickstart.md | 1 + 22 files changed, 527 insertions(+), 8 deletions(-) create mode 100644 helix-examples/.gitignore create mode 100644 helix-examples/DISCLAIMER create mode 100644 helix-examples/LICENSE create mode 100644 helix-examples/NOTICE create mode 100644 helix-examples/pom.xml create mode 100644 helix-examples/src/assemble/assembly.xml rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/BootstrapHandler.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/BootstrapProcess.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/DummyParticipant.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/ExampleHelper.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/ExampleProcess.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/IdealStateBuilderExample.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/IdealStateExample.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/LeaderStandbyStateModelFactory.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/OnlineOfflineStateModelFactory.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/Quickstart.java (100%) rename {helix-core => helix-examples}/src/main/java/org/apache/helix/examples/package-info.java (100%) create mode 100644 helix-examples/src/test/conf/testng.xml diff --git a/helix-core/pom.xml b/helix-core/pom.xml index e57c3ffef9..af04d858c4 100644 --- a/helix-core/pom.xml +++ b/helix-core/pom.xml @@ -201,10 +201,6 @@ under the License. org.apache.helix.tools.ZkLogCSVFormatter zk-log-csv-formatter - - org.apache.helix.examples.ExampleProcess - start-helix-participant - org.apache.helix.tools.LocalZKServer start-standalone-zookeeper @@ -213,10 +209,6 @@ under the License. org.apache.helix.tools.ZkLogAnalyzer zk-log-analyzer - - org.apache.helix.examples.Quickstart - quickstart - org.apache.helix.tools.JmxDumper JmxDumper diff --git a/helix-examples/.gitignore b/helix-examples/.gitignore new file mode 100644 index 0000000000..2411bd8ac3 --- /dev/null +++ b/helix-examples/.gitignore @@ -0,0 +1,16 @@ +/target +/.project +/.classpath +/.settings +/zkdata +/test-output +/src/main/scripts/integration-test/var +#/src/test/java/com/linkedin/dds/ +#/src/main/scripts/integration-test/config +/src/main/scripts/target/ +/src/main/scripts/integration-test/script/.metadata_infra +#/src/main/scripts/integration-test/script/dds_driver.py +#/src/main/scripts/integration-test/script/pexpect.py +#/src/main/scripts/integration-test/script/utility.py +*.pyc +/bin diff --git a/helix-examples/DISCLAIMER b/helix-examples/DISCLAIMER new file mode 100644 index 0000000000..2001d3154d --- /dev/null +++ b/helix-examples/DISCLAIMER @@ -0,0 +1,15 @@ +Apache Helix is an effort undergoing incubation at the Apache Software +Foundation (ASF), sponsored by the Apache Incubator PMC. + +Incubation is required of all newly accepted projects until a further review +indicates that the infrastructure, communications, and decision making process +have stabilized in a manner consistent with other successful ASF projects. + +While incubation status is not necessarily a reflection of the completeness +or stability of the code, it does indicate that the project has yet to be +fully endorsed by the ASF. + +For more information about the incubation status of the Apache Helix project you +can go to the following page: + +http://incubator.apache.org/projects/helix.html diff --git a/helix-examples/LICENSE b/helix-examples/LICENSE new file mode 100644 index 0000000000..413913f132 --- /dev/null +++ b/helix-examples/LICENSE @@ -0,0 +1,273 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +For xstream: + +Copyright (c) 2003-2006, Joe Walnes +Copyright (c) 2006-2009, 2011 XStream Committers +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of +conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of XStream nor the names of its contributors may be used to endorse +or promote products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +for jline: + +Copyright (c) 2002-2006, Marc Prud'hommeaux +All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with +the distribution. + +Neither the name of JLine nor the names of its contributors +may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + + + diff --git a/helix-examples/NOTICE b/helix-examples/NOTICE new file mode 100644 index 0000000000..e070e15573 --- /dev/null +++ b/helix-examples/NOTICE @@ -0,0 +1,30 @@ +Apache Helix +Copyright 2012 The Apache Software Foundation + + +I. Included Software + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). +Licensed under the Apache License 2.0. + +This product includes software developed at +Codehaus (http://www.codehaus.org/) +Licensed under the BSD License. + +This product includes software developed at +jline (http://jline.sourceforge.net/ ) +Licensed under the BSD License. + +This product includes software developed at +josql (http://sourceforge.net/projects/josql/). +Licensed under the Apache License 2.0. + +This product includes software developed at +restlet (http://www.restlet.org/about/legal). +Licensed under the Apache License 2.0. + + +II. License Summary +- Apache License 2.0 +- BSD License diff --git a/helix-examples/pom.xml b/helix-examples/pom.xml new file mode 100644 index 0000000000..6c00af6f49 --- /dev/null +++ b/helix-examples/pom.xml @@ -0,0 +1,104 @@ + + + + + org.apache.helix + helix + 0.6.2-incubating-SNAPSHOT + + 4.0.0 + + helix-examples + jar + Apache Helix :: Helix Examples + + + + org.apache.helix*, + org.codehaus.jackson*, + org.apache.commons.cli*, + org.apache.log4j, + org.restlet*, + * + + org.apache.helix.webapp*;version="${project.version};-noimport:=true + + + + + org.apache.helix + helix-core + + + org.testng + testng + test + + + + + + ${basedir} + + DISCLAIMER + + + + + + org.codehaus.mojo + appassembler-maven-plugin + + + windows + unix + + + + org.apache.helix.examples.ExampleProcess + start-helix-participant + + + org.apache.helix.examples.Quickstart + quickstart + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + src/assemble/assembly.xml + + + + + package + + single + + + + + + + diff --git a/helix-examples/src/assemble/assembly.xml b/helix-examples/src/assemble/assembly.xml new file mode 100644 index 0000000000..c2d08a1cda --- /dev/null +++ b/helix-examples/src/assemble/assembly.xml @@ -0,0 +1,60 @@ + + + + pkg + + tar + + + + ${project.build.directory}/${project.artifactId}-pkg/bin + bin + unix + 0755 + 0755 + + + ${project.build.directory}/${project.artifactId}-pkg/repo/ + repo + 0755 + 0755 + + **/*.xml + + + + ${project.build.directory}/${project.artifactId}-pkg/conf + conf + unix + 0755 + 0755 + + + ${project.basedir} + / + + LICENSE + NOTICE + DISCLAIMER + + 0755 + + + \ No newline at end of file diff --git a/helix-core/src/main/java/org/apache/helix/examples/BootstrapHandler.java b/helix-examples/src/main/java/org/apache/helix/examples/BootstrapHandler.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/BootstrapHandler.java rename to helix-examples/src/main/java/org/apache/helix/examples/BootstrapHandler.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/BootstrapProcess.java b/helix-examples/src/main/java/org/apache/helix/examples/BootstrapProcess.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/BootstrapProcess.java rename to helix-examples/src/main/java/org/apache/helix/examples/BootstrapProcess.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/DummyParticipant.java b/helix-examples/src/main/java/org/apache/helix/examples/DummyParticipant.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/DummyParticipant.java rename to helix-examples/src/main/java/org/apache/helix/examples/DummyParticipant.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/ExampleHelper.java b/helix-examples/src/main/java/org/apache/helix/examples/ExampleHelper.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/ExampleHelper.java rename to helix-examples/src/main/java/org/apache/helix/examples/ExampleHelper.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/ExampleProcess.java b/helix-examples/src/main/java/org/apache/helix/examples/ExampleProcess.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/ExampleProcess.java rename to helix-examples/src/main/java/org/apache/helix/examples/ExampleProcess.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/IdealStateBuilderExample.java b/helix-examples/src/main/java/org/apache/helix/examples/IdealStateBuilderExample.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/IdealStateBuilderExample.java rename to helix-examples/src/main/java/org/apache/helix/examples/IdealStateBuilderExample.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/IdealStateExample.java b/helix-examples/src/main/java/org/apache/helix/examples/IdealStateExample.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/IdealStateExample.java rename to helix-examples/src/main/java/org/apache/helix/examples/IdealStateExample.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/LeaderStandbyStateModelFactory.java b/helix-examples/src/main/java/org/apache/helix/examples/LeaderStandbyStateModelFactory.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/LeaderStandbyStateModelFactory.java rename to helix-examples/src/main/java/org/apache/helix/examples/LeaderStandbyStateModelFactory.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java b/helix-examples/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java rename to helix-examples/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/OnlineOfflineStateModelFactory.java b/helix-examples/src/main/java/org/apache/helix/examples/OnlineOfflineStateModelFactory.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/OnlineOfflineStateModelFactory.java rename to helix-examples/src/main/java/org/apache/helix/examples/OnlineOfflineStateModelFactory.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/Quickstart.java b/helix-examples/src/main/java/org/apache/helix/examples/Quickstart.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/Quickstart.java rename to helix-examples/src/main/java/org/apache/helix/examples/Quickstart.java diff --git a/helix-core/src/main/java/org/apache/helix/examples/package-info.java b/helix-examples/src/main/java/org/apache/helix/examples/package-info.java similarity index 100% rename from helix-core/src/main/java/org/apache/helix/examples/package-info.java rename to helix-examples/src/main/java/org/apache/helix/examples/package-info.java diff --git a/helix-examples/src/test/conf/testng.xml b/helix-examples/src/test/conf/testng.xml new file mode 100644 index 0000000000..37bccf3768 --- /dev/null +++ b/helix-examples/src/test/conf/testng.xml @@ -0,0 +1,27 @@ + + + + + + + + + + diff --git a/pom.xml b/pom.xml index 76474fd9b1..ee6f5733be 100644 --- a/pom.xml +++ b/pom.xml @@ -116,6 +116,7 @@ under the License. helix-core helix-admin-webapp helix-agent + helix-examples recipes diff --git a/src/site/markdown/Quickstart.md b/src/site/markdown/Quickstart.md index 4e09d24ecb..dcffc1b908 100644 --- a/src/site/markdown/Quickstart.md +++ b/src/site/markdown/Quickstart.md @@ -26,6 +26,7 @@ First, let\'s get Helix, either build it, or download. git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git cd incubator-helix + git checkout tags/helix-0.6.1-incubating mvn install package -DskipTests cd helix-core/target/helix-core-pkg/bin //This folder contains all the scripts used in following sections chmod +x * From f73b3a7ae4398d61013899ee5eb758d4f2d1143e Mon Sep 17 00:00:00 2001 From: zzhang Date: Mon, 26 Aug 2013 13:40:33 -0700 Subject: [PATCH 003/113] [HELIX-225] fix helix-examples package build error --- helix-examples/src/test/conf/testng.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helix-examples/src/test/conf/testng.xml b/helix-examples/src/test/conf/testng.xml index 37bccf3768..07080ea5ef 100644 --- a/helix-examples/src/test/conf/testng.xml +++ b/helix-examples/src/test/conf/testng.xml @@ -21,7 +21,7 @@ under the License. - + From 21c4fcb5157366acbf626c7c46e996e01c70ff89 Mon Sep 17 00:00:00 2001 From: zzhang Date: Mon, 26 Aug 2013 14:56:24 -0700 Subject: [PATCH 004/113] [HELIX-174] Clean up ideal state calculators, move them to the controller rebalancer package, rb=13696 --- ...ConsistentHashingMasterSlaveStrategy.java} | 12 +- .../strategy/DefaultTwoStateStrategy.java} | 489 +++++++++--------- .../strategy/EspressoRelayStrategy.java} | 4 +- .../strategy/RUSHMasterSlaveStrategy.java} | 8 +- .../strategy}/RUSHrHash.java | 2 +- .../strategy/ShufflingTwoStateStrategy.java} | 38 +- .../apache/helix/manager/zk/ZKHelixAdmin.java | 10 +- .../apache/helix/tools/YAISCalculator.java | 174 ------- .../org/apache/helix/util/RebalanceUtil.java | 4 +- .../stages/TestCompatibilityCheckStage.java | 10 +- .../stages/TestResourceComputationStage.java | 16 +- .../strategy/TestEspressoRelayStrategy.java} | 10 +- .../TestEspressoStorageClusterIdealState.java | 35 +- .../TestShufflingTwoStateStrategy.java} | 71 ++- .../integration/TestAutoIsWithEmptyMap.java | 4 +- .../apache/helix/integration/TestDriver.java | 14 +- .../helix/integration/TestExpandCluster.java | 2 +- .../integration/TestRenamePartition.java | 8 +- .../josql/TestClusterJosqlQueryProcessor.java | 8 +- .../TestDefaultMessagingService.java | 4 +- .../mbeans/TestClusterStatusMonitor.java | 13 +- .../mbeans/TestResourceMonitor.java | 6 +- 22 files changed, 377 insertions(+), 565 deletions(-) rename helix-core/src/main/java/org/apache/helix/{tools/IdealCalculatorByConsistentHashing.java => controller/strategy/ConsistentHashingMasterSlaveStrategy.java} (97%) rename helix-core/src/main/java/org/apache/helix/{tools/DefaultIdealStateCalculator.java => controller/strategy/DefaultTwoStateStrategy.java} (51%) rename helix-core/src/main/java/org/apache/helix/{tools/IdealStateCalculatorForEspressoRelay.java => controller/strategy/EspressoRelayStrategy.java} (98%) rename helix-core/src/main/java/org/apache/helix/{tools/IdealStateCalculatorByRush.java => controller/strategy/RUSHMasterSlaveStrategy.java} (97%) rename helix-core/src/main/java/org/apache/helix/{tools => controller/strategy}/RUSHrHash.java (99%) rename helix-core/src/main/java/org/apache/helix/{tools/IdealStateCalculatorByShuffling.java => controller/strategy/ShufflingTwoStateStrategy.java} (74%) delete mode 100644 helix-core/src/main/java/org/apache/helix/tools/YAISCalculator.java rename helix-core/src/test/java/org/apache/helix/{TestRelayIdealStateCalculator.java => controller/strategy/TestEspressoRelayStrategy.java} (89%) rename helix-core/src/test/java/org/apache/helix/{ => controller/strategy}/TestEspressoStorageClusterIdealState.java (90%) rename helix-core/src/test/java/org/apache/helix/{TestShuffledIdealState.java => controller/strategy/TestShufflingTwoStateStrategy.java} (70%) diff --git a/helix-core/src/main/java/org/apache/helix/tools/IdealCalculatorByConsistentHashing.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/ConsistentHashingMasterSlaveStrategy.java similarity index 97% rename from helix-core/src/main/java/org/apache/helix/tools/IdealCalculatorByConsistentHashing.java rename to helix-core/src/main/java/org/apache/helix/controller/strategy/ConsistentHashingMasterSlaveStrategy.java index 1101a6df84..017d3d8c83 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/IdealCalculatorByConsistentHashing.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/ConsistentHashingMasterSlaveStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix.tools; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -32,7 +32,7 @@ import org.apache.helix.ZNRecord; import org.apache.helix.model.IdealState.IdealStateProperty; -public class IdealCalculatorByConsistentHashing { +public class ConsistentHashingMasterSlaveStrategy { /** * Interface to calculate the hash function value of a string */ @@ -456,8 +456,8 @@ public static void printHashRingStat(int[] hashRing) { static int[] getFnvHashArray(List strings) { int[] result = new int[strings.size()]; int i = 0; - IdealCalculatorByConsistentHashing.FnvHash hashfunc = - new IdealCalculatorByConsistentHashing.FnvHash(); + ConsistentHashingMasterSlaveStrategy.FnvHash hashfunc = + new ConsistentHashingMasterSlaveStrategy.FnvHash(); for (String s : strings) { int val = hashfunc.getHashValue(s) % 65536; if (val < 0) @@ -498,8 +498,8 @@ public static void main(String args[]) throws Exception { String dbName = "espressoDB1"; ZNRecord result = - IdealCalculatorByConsistentHashing.calculateIdealState(instanceNames, partitions, replicas, - dbName, new IdealCalculatorByConsistentHashing.FnvHash()); + ConsistentHashingMasterSlaveStrategy.calculateIdealState(instanceNames, partitions, + replicas, dbName, new ConsistentHashingMasterSlaveStrategy.FnvHash()); System.out.println("\nMaster :"); printIdealStateStats(result, "MASTER"); diff --git a/helix-core/src/main/java/org/apache/helix/tools/DefaultIdealStateCalculator.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/DefaultTwoStateStrategy.java similarity index 51% rename from helix-core/src/main/java/org/apache/helix/tools/DefaultIdealStateCalculator.java rename to helix-core/src/main/java/org/apache/helix/controller/strategy/DefaultTwoStateStrategy.java index dea8e0ae52..c965748ba0 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/DefaultIdealStateCalculator.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/DefaultTwoStateStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix.tools; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -31,17 +31,17 @@ import org.apache.helix.model.IdealState.IdealStateProperty; /** - * DefaultIdealStateCalculator tries to optimally allocate master/slave partitions among - * espresso storage nodes. + * DefaultIdealStateCalculator tries to optimally allocate two state partitions among + * storage nodes. * Given a batch of storage nodes, the partition and replication factor, the algorithm first given a * initial state * When new batches of storage nodes are added, the algorithm will calculate the new ideal state * such that the total * partition movements are minimized. */ -public class DefaultIdealStateCalculator { - static final String _MasterAssignmentMap = "MasterAssignmentMap"; - static final String _SlaveAssignmentMap = "SlaveAssignmentMap"; +public class DefaultTwoStateStrategy { + static final String _PrimaryAssignmentMap = "PrimaryAssignmentMap"; + static final String _SecondaryAssignmentMap = "SecondaryAssignmentMap"; static final String _partitions = "partitions"; static final String _replicas = "replicas"; @@ -49,49 +49,49 @@ public class DefaultIdealStateCalculator { * Calculate the initial ideal state given a batch of storage instances, the replication factor * and * number of partitions - * 1. Calculate the master assignment by random shuffling - * 2. for each storage instance, calculate the 1st slave assignment map, by another random - * shuffling - * 3. for each storage instance, calculate the i-th slave assignment map - * 4. Combine the i-th slave assignment maps together + * 1. Calculate the primary state assignment by random shuffling + * 2. for each storage instance, calculate the 1st secondary state assignment map, by another + * random shuffling + * 3. for each storage instance, calculate the i-th secondary state assignment map + * 4. Combine the i-th secondary state assignment maps together * @param instanceNames * list of storage node instances * @param partitions * number of partitions * @param replicas - * The number of replicas (slave partitions) per master partition - * @param masterStateValue - * master state value: e.g. "MASTER" or "LEADER" - * @param slaveStateValue - * slave state value: e.g. "SLAVE" or "STANDBY" + * The number of replicas (secondary partitions) per primary partition + * @param primaryStateValue + * primary state value: e.g. "MASTER" or "LEADER" + * @param secondaryStateValue + * secondary state value: e.g. "SLAVE" or "STANDBY" * @param resourceName * @return a ZNRecord that contain the idealstate info */ public static ZNRecord calculateIdealState(List instanceNames, int partitions, - int replicas, String resourceName, String masterStateValue, String slaveStateValue) { + int replicas, String resourceName, String primaryStateValue, String secondaryStateValue) { Collections.sort(instanceNames); if (instanceNames.size() < replicas + 1) { throw new HelixException("Number of instances must not be less than replicas + 1. " + "instanceNr:" + instanceNames.size() + ", replicas:" + replicas); } else if (partitions < instanceNames.size()) { ZNRecord idealState = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - resourceName, 12345, masterStateValue, slaveStateValue); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, + resourceName, 12345, primaryStateValue, secondaryStateValue); int i = 0; for (String partitionId : idealState.getMapFields().keySet()) { Map partitionAssignmentMap = idealState.getMapField(partitionId); List partitionAssignmentPriorityList = new ArrayList(); - String masterInstance = ""; + String primaryInstance = ""; for (String instanceName : partitionAssignmentMap.keySet()) { - if (partitionAssignmentMap.get(instanceName).equalsIgnoreCase(masterStateValue) - && masterInstance.equals("")) { - masterInstance = instanceName; + if (partitionAssignmentMap.get(instanceName).equalsIgnoreCase(primaryStateValue) + && primaryInstance.equals("")) { + primaryInstance = instanceName; } else { partitionAssignmentPriorityList.add(instanceName); } } Collections.shuffle(partitionAssignmentPriorityList, new Random(i++)); - partitionAssignmentPriorityList.add(0, masterInstance); + partitionAssignmentPriorityList.add(0, primaryInstance); idealState.setListField(partitionId, partitionAssignmentPriorityList); } return idealState; @@ -99,12 +99,12 @@ public static ZNRecord calculateIdealState(List instanceNames, int parti Map result = calculateInitialIdealState(instanceNames, partitions, replicas); - return convertToZNRecord(result, resourceName, masterStateValue, slaveStateValue); + return convertToZNRecord(result, resourceName, primaryStateValue, secondaryStateValue); } public static ZNRecord calculateIdealStateBatch(List> instanceBatches, - int partitions, int replicas, String resourceName, String masterStateValue, - String slaveStateValue) { + int partitions, int replicas, String resourceName, String primaryStateValue, + String secondaryStateValue) { Map result = calculateInitialIdealState(instanceBatches.get(0), partitions, replicas); @@ -112,18 +112,18 @@ public static ZNRecord calculateIdealStateBatch(List> instanceBatch result = calculateNextIdealState(instanceBatches.get(i), result); } - return convertToZNRecord(result, resourceName, masterStateValue, slaveStateValue); + return convertToZNRecord(result, resourceName, primaryStateValue, secondaryStateValue); } /** * Convert the internal result (stored as a Map) into ZNRecord. */ public static ZNRecord convertToZNRecord(Map result, String resourceName, - String masterStateValue, String slaveStateValue) { - Map> nodeMasterAssignmentMap = - (Map>) (result.get(_MasterAssignmentMap)); - Map>> nodeSlaveAssignmentMap = - (Map>>) (result.get(_SlaveAssignmentMap)); + String primaryStateValue, String secondaryStateValue) { + Map> nodePrimaryAssignmentMap = + (Map>) (result.get(_PrimaryAssignmentMap)); + Map>> nodeSecondaryAssignmentMap = + (Map>>) (result.get(_SecondaryAssignmentMap)); int partitions = (Integer) (result.get("partitions")); @@ -131,44 +131,45 @@ public static ZNRecord convertToZNRecord(Map result, String reso idealState.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(), String.valueOf(partitions)); - for (String instanceName : nodeMasterAssignmentMap.keySet()) { - for (Integer partitionId : nodeMasterAssignmentMap.get(instanceName)) { + for (String instanceName : nodePrimaryAssignmentMap.keySet()) { + for (Integer partitionId : nodePrimaryAssignmentMap.get(instanceName)) { String partitionName = resourceName + "_" + partitionId; if (!idealState.getMapFields().containsKey(partitionName)) { idealState.setMapField(partitionName, new TreeMap()); } - idealState.getMapField(partitionName).put(instanceName, masterStateValue); + idealState.getMapField(partitionName).put(instanceName, primaryStateValue); } } - for (String instanceName : nodeSlaveAssignmentMap.keySet()) { - Map> slaveAssignmentMap = nodeSlaveAssignmentMap.get(instanceName); + for (String instanceName : nodeSecondaryAssignmentMap.keySet()) { + Map> secondaryAssignmentMap = + nodeSecondaryAssignmentMap.get(instanceName); - for (String slaveNode : slaveAssignmentMap.keySet()) { - List slaveAssignment = slaveAssignmentMap.get(slaveNode); - for (Integer partitionId : slaveAssignment) { + for (String secondaryNode : secondaryAssignmentMap.keySet()) { + List secondaryAssignment = secondaryAssignmentMap.get(secondaryNode); + for (Integer partitionId : secondaryAssignment) { String partitionName = resourceName + "_" + partitionId; - idealState.getMapField(partitionName).put(slaveNode, slaveStateValue); + idealState.getMapField(partitionName).put(secondaryNode, secondaryStateValue); } } } - // generate the priority list of instances per partition. Master should be at front and slave - // follows. + // generate the priority list of instances per partition. the primary should be at front + // and the secondaries follow. for (String partitionId : idealState.getMapFields().keySet()) { Map partitionAssignmentMap = idealState.getMapField(partitionId); List partitionAssignmentPriorityList = new ArrayList(); - String masterInstance = ""; + String primaryInstance = ""; for (String instanceName : partitionAssignmentMap.keySet()) { - if (partitionAssignmentMap.get(instanceName).equalsIgnoreCase(masterStateValue) - && masterInstance.equals("")) { - masterInstance = instanceName; + if (partitionAssignmentMap.get(instanceName).equalsIgnoreCase(primaryStateValue) + && primaryInstance.equals("")) { + primaryInstance = instanceName; } else { partitionAssignmentPriorityList.add(instanceName); } } Collections.shuffle(partitionAssignmentPriorityList); - partitionAssignmentPriorityList.add(0, masterInstance); + partitionAssignmentPriorityList.add(0, primaryInstance); idealState.setListField(partitionId, partitionAssignmentPriorityList); } assert (result.containsKey("replicas")); @@ -181,11 +182,11 @@ public static ZNRecord convertToZNRecord(Map result, String reso * Calculate the initial ideal state given a batch of storage instances, the replication factor * and * number of partitions - * 1. Calculate the master assignment by random shuffling - * 2. for each storage instance, calculate the 1st slave assignment map, by another random - * shuffling - * 3. for each storage instance, calculate the i-th slave assignment map - * 4. Combine the i-th slave assignment maps together + * 1. Calculate the primary assignment by random shuffling + * 2. for each storage instance, calculate the 1st secondary state assignment map, by another + * random shuffling + * 3. for each storage instance, calculate the i-th secondary state assignment map + * 4. Combine the i-th secondary state assignment maps together * @param instanceNames * list of storage node instances * @param weight @@ -193,108 +194,109 @@ public static ZNRecord convertToZNRecord(Map result, String reso * @param partitions * number of partitions * @param replicas - * The number of replicas (slave partitions) per master partition + * The number of replicas (secondary partitions) per primary partition * @return a map that contain the idealstate info */ public static Map calculateInitialIdealState(List instanceNames, int partitions, int replicas) { Random r = new Random(54321); assert (replicas <= instanceNames.size() - 1); - - ArrayList masterPartitionAssignment = new ArrayList(); + ArrayList primaryPartitionAssignment = new ArrayList(); for (int i = 0; i < partitions; i++) { - masterPartitionAssignment.add(i); + primaryPartitionAssignment.add(i); } // shuffle the partition id array - Collections.shuffle(masterPartitionAssignment, new Random(r.nextInt())); + Collections.shuffle(primaryPartitionAssignment, new Random(r.nextInt())); - // 1. Generate the random master partition assignment - // instanceName -> List of master partitions on that instance - Map> nodeMasterAssignmentMap = new TreeMap>(); - for (int i = 0; i < masterPartitionAssignment.size(); i++) { + // 1. Generate the random primary partition assignment + // instanceName -> List of primary partitions on that instance + Map> nodePrimaryAssignmentMap = new TreeMap>(); + for (int i = 0; i < primaryPartitionAssignment.size(); i++) { String instanceName = instanceNames.get(i % instanceNames.size()); - if (!nodeMasterAssignmentMap.containsKey(instanceName)) { - nodeMasterAssignmentMap.put(instanceName, new ArrayList()); + if (!nodePrimaryAssignmentMap.containsKey(instanceName)) { + nodePrimaryAssignmentMap.put(instanceName, new ArrayList()); } - nodeMasterAssignmentMap.get(instanceName).add(masterPartitionAssignment.get(i)); + nodePrimaryAssignmentMap.get(instanceName).add(primaryPartitionAssignment.get(i)); } - // instanceName -> slave assignment for its master partitions - // slave assignment: instanceName -> list of slave partitions on it - List>>> nodeSlaveAssignmentMapsList = + // instanceName -> secondary assignment for its primary partitions + // secondary assignment: instanceName -> list of secondary partitions on it + List>>> nodeSecondaryAssignmentMapsList = new ArrayList>>>(replicas); - Map>> firstNodeSlaveAssignmentMap = + Map>> firstNodeSecondaryAssignmentMap = new TreeMap>>(); - Map>> combinedNodeSlaveAssignmentMap = + Map>> combinedNodeSecondaryAssignmentMap = new TreeMap>>(); if (replicas > 0) { - // 2. For each node, calculate the evenly distributed slave as the first slave assignment - // We will figure out the 2nd ...replicas-th slave assignment based on the first level slave - // assignment + // 2. For each node, calculate the evenly distributed secondary state as the first secondary + // state assignment + // We will figure out the 2nd ...replicas-th secondary state assignment based on the first + // level secondary state assignment for (int i = 0; i < instanceNames.size(); i++) { - List slaveInstances = new ArrayList(); - ArrayList slaveAssignment = new ArrayList(); - TreeMap> slaveAssignmentMap = new TreeMap>(); + List secondaryInstances = new ArrayList(); + ArrayList secondaryAssignment = new ArrayList(); + TreeMap> secondaryAssignmentMap = + new TreeMap>(); for (int j = 0; j < instanceNames.size(); j++) { if (j != i) { - slaveInstances.add(instanceNames.get(j)); - slaveAssignmentMap.put(instanceNames.get(j), new ArrayList()); + secondaryInstances.add(instanceNames.get(j)); + secondaryAssignmentMap.put(instanceNames.get(j), new ArrayList()); } } - // Get the number of master partitions on instanceName - List masterAssignment = nodeMasterAssignmentMap.get(instanceNames.get(i)); - // do a random shuffling as in step 1, so that the first-level slave are distributed among - // rest instances + // Get the number of primary partitions on instanceName + List primaryAssignment = nodePrimaryAssignmentMap.get(instanceNames.get(i)); + // do a random shuffling as in step 1, so that the first-level secondary states are + // distributed among rest instances - for (int j = 0; j < masterAssignment.size(); j++) { - slaveAssignment.add(j); + for (int j = 0; j < primaryAssignment.size(); j++) { + secondaryAssignment.add(j); } - Collections.shuffle(slaveAssignment, new Random(r.nextInt())); - - Collections.shuffle(slaveInstances, new Random(instanceNames.get(i).hashCode())); - - // Get the slave assignment map of node instanceName - for (int j = 0; j < masterAssignment.size(); j++) { - String slaveInstanceName = - slaveInstances.get(slaveAssignment.get(j) % slaveInstances.size()); - if (!slaveAssignmentMap.containsKey(slaveInstanceName)) { - slaveAssignmentMap.put(slaveInstanceName, new ArrayList()); + Collections.shuffle(secondaryAssignment, new Random(r.nextInt())); + + Collections.shuffle(secondaryInstances, new Random(instanceNames.get(i).hashCode())); + // Get the secondary assignment map of node instanceName + for (int j = 0; j < primaryAssignment.size(); j++) { + String secondaryInstanceName = + secondaryInstances.get(secondaryAssignment.get(j) % secondaryInstances.size()); + if (!secondaryAssignmentMap.containsKey(secondaryInstanceName)) { + secondaryAssignmentMap.put(secondaryInstanceName, new ArrayList()); } - slaveAssignmentMap.get(slaveInstanceName).add(masterAssignment.get(j)); + secondaryAssignmentMap.get(secondaryInstanceName).add(primaryAssignment.get(j)); } - firstNodeSlaveAssignmentMap.put(instanceNames.get(i), slaveAssignmentMap); + firstNodeSecondaryAssignmentMap.put(instanceNames.get(i), secondaryAssignmentMap); } - nodeSlaveAssignmentMapsList.add(firstNodeSlaveAssignmentMap); - // From the first slave assignment map, calculate the rest slave assignment maps + nodeSecondaryAssignmentMapsList.add(firstNodeSecondaryAssignmentMap); + // From the first secondary assignment map, calculate the rest secondary assignment maps for (int replicaOrder = 1; replicaOrder < replicas; replicaOrder++) { - // calculate the next slave partition assignment map - Map>> nextNodeSlaveAssignmentMap = - calculateNextSlaveAssignemntMap(firstNodeSlaveAssignmentMap, replicaOrder); - nodeSlaveAssignmentMapsList.add(nextNodeSlaveAssignmentMap); + // calculate the next secondary partition assignment map + Map>> nextNodeSecondaryAssignmentMap = + calculateNextSecondaryAssignemntMap(firstNodeSecondaryAssignmentMap, replicaOrder); + nodeSecondaryAssignmentMapsList.add(nextNodeSecondaryAssignmentMap); } - // Combine the calculated 1...replicas-th slave assignment map together - - for (String instanceName : nodeMasterAssignmentMap.keySet()) { - Map> combinedSlaveAssignmentMap = + // Combine the calculated 1...replicas-th secondary assignment map together + for (String instanceName : nodePrimaryAssignmentMap.keySet()) { + Map> combinedSecondaryAssignmentMap = new TreeMap>(); - for (Map>> slaveNodeAssignmentMap : nodeSlaveAssignmentMapsList) { - Map> slaveAssignmentMap = slaveNodeAssignmentMap.get(instanceName); + for (Map>> secondaryNodeAssignmentMap : nodeSecondaryAssignmentMapsList) { + Map> secondaryAssignmentMap = + secondaryNodeAssignmentMap.get(instanceName); - for (String slaveInstance : slaveAssignmentMap.keySet()) { - if (!combinedSlaveAssignmentMap.containsKey(slaveInstance)) { - combinedSlaveAssignmentMap.put(slaveInstance, new ArrayList()); + for (String secondaryInstance : secondaryAssignmentMap.keySet()) { + if (!combinedSecondaryAssignmentMap.containsKey(secondaryInstance)) { + combinedSecondaryAssignmentMap.put(secondaryInstance, new ArrayList()); } - combinedSlaveAssignmentMap.get(slaveInstance).addAll( - slaveAssignmentMap.get(slaveInstance)); + combinedSecondaryAssignmentMap.get(secondaryInstance).addAll( + secondaryAssignmentMap.get(secondaryInstance)); } } - migrateSlaveAssignMapToNewInstances(combinedSlaveAssignmentMap, new ArrayList()); - combinedNodeSlaveAssignmentMap.put(instanceName, combinedSlaveAssignmentMap); + migrateSecondaryAssignMapToNewInstances(combinedSecondaryAssignmentMap, + new ArrayList()); + combinedNodeSecondaryAssignmentMap.put(instanceName, combinedSecondaryAssignmentMap); } } /* @@ -340,40 +342,41 @@ public static Map calculateInitialIdealState(List instan * } */ Map result = new TreeMap(); - result.put("MasterAssignmentMap", nodeMasterAssignmentMap); - result.put("SlaveAssignmentMap", combinedNodeSlaveAssignmentMap); + result.put("PrimaryAssignmentMap", nodePrimaryAssignmentMap); + result.put("SecondaryAssignmentMap", combinedNodeSecondaryAssignmentMap); result.put("replicas", new Integer(replicas)); result.put("partitions", new Integer(partitions)); return result; } /** - * In the case there are more than 1 slave, we use the following algorithm to calculate the n-th - * slave - * assignment map based on the first level slave assignment map. - * @param firstInstanceSlaveAssignmentMap the first slave assignment map for all instances - * @param order of the slave - * @return the n-th slave assignment map for all the instances + * In the case there are more than 1 secondary, we use the following algorithm to calculate the + * n-th secondary + * assignment map based on the first level secondary assignment map. + * @param firstInstanceSecondaryAssignmentMap the first secondary assignment map for all instances + * @param order of the secondary state + * @return the n-th secondary assignment map for all the instances */ - static Map>> calculateNextSlaveAssignemntMap( - Map>> firstInstanceSlaveAssignmentMap, int replicaOrder) { + static Map>> calculateNextSecondaryAssignemntMap( + Map>> firstInstanceSecondaryAssignmentMap, int replicaOrder) { Map>> result = new TreeMap>>(); - for (String currentInstance : firstInstanceSlaveAssignmentMap.keySet()) { + for (String currentInstance : firstInstanceSecondaryAssignmentMap.keySet()) { Map> resultAssignmentMap = new TreeMap>(); result.put(currentInstance, resultAssignmentMap); } - for (String currentInstance : firstInstanceSlaveAssignmentMap.keySet()) { - Map> previousSlaveAssignmentMap = - firstInstanceSlaveAssignmentMap.get(currentInstance); + for (String currentInstance : firstInstanceSecondaryAssignmentMap.keySet()) { + Map> previousSecondaryAssignmentMap = + firstInstanceSecondaryAssignmentMap.get(currentInstance); Map> resultAssignmentMap = result.get(currentInstance); int offset = replicaOrder - 1; - for (String instance : previousSlaveAssignmentMap.keySet()) { - List otherInstances = new ArrayList(previousSlaveAssignmentMap.size() - 1); + for (String instance : previousSecondaryAssignmentMap.keySet()) { + List otherInstances = + new ArrayList(previousSecondaryAssignmentMap.size() - 1); // Obtain an array of other instances - for (String otherInstance : previousSlaveAssignmentMap.keySet()) { + for (String otherInstance : previousSecondaryAssignmentMap.keySet()) { otherInstances.add(otherInstance); } Collections.sort(otherInstances); @@ -387,12 +390,12 @@ static Map>> calculateNextSlaveAssignemntMap( if (instanceIndex == otherInstances.size() - 1) { instanceIndex--; } - // Since we need to evenly distribute the slaves on "instance" to other partitions, we + // Since we need to evenly distribute the secondaries on "instance" to other partitions, we // need to remove "instance" from the array. otherInstances.remove(instance); - // distribute previous slave assignment to other instances. - List previousAssignmentList = previousSlaveAssignmentMap.get(instance); + // distribute previous secondary assignment to other instances. + List previousAssignmentList = previousSecondaryAssignmentMap.get(instance); for (int i = 0; i < previousAssignmentList.size(); i++) { // Evenly distribute the previousAssignmentList to the remaining other instances @@ -411,20 +414,21 @@ static Map>> calculateNextSlaveAssignemntMap( /** * Given the current idealState, and the list of new Instances needed to be added, calculate the * new Ideal state. - * 1. Calculate how many master partitions should be moved to the new cluster of instances - * 2. assign the number of master partitions px to be moved to each previous node + * 1. Calculate how many primary partitions should be moved to the new cluster of instances + * 2. assign the number of primary partitions px to be moved to each previous node * 3. for each previous node, * 3.1 randomly choose px nodes, move them to temp list - * 3.2 for each px nodes, remove them from the slave assignment map; record the map position of + * 3.2 for each px nodes, remove them from the secondary assignment map; record the map position + * of * the partition; - * 3.3 calculate # of new nodes that should be put in the slave assignment map - * 3.4 even-fy the slave assignment map; + * 3.3 calculate # of new nodes that should be put in the secondary assignment map + * 3.4 even-fy the secondary assignment map; * 3.5 randomly place # of new nodes that should be placed in - * 4. from all the temp master node list get from 3.1, + * 4. from all the temp primary node list get from 3.1, * 4.1 randomly assign them to nodes in the new cluster * 5. for each node in the new cluster, - * 5.1 assemble the slave assignment map - * 5.2 even-fy the slave assignment map + * 5.1 assemble the secondary assignment map + * 5.2 even-fy the secondary assignment map * @param newInstances * list of new added storage node instances * @param weight @@ -435,122 +439,120 @@ static Map>> calculateNextSlaveAssignemntMap( */ public static Map calculateNextIdealState(List newInstances, Map previousIdealState) { - // Obtain the master / slave assignment info maps + // Obtain the primary / secondary assignment info maps Collections.sort(newInstances); - Map> previousMasterAssignmentMap = - (Map>) (previousIdealState.get("MasterAssignmentMap")); - Map>> nodeSlaveAssignmentMap = - (Map>>) (previousIdealState.get("SlaveAssignmentMap")); + Map> previousPrimaryAssignmentMap = + (Map>) (previousIdealState.get("PrimaryAssignmentMap")); + Map>> nodeSecondaryAssignmentMap = + (Map>>) (previousIdealState.get("SecondaryAssignmentMap")); List oldInstances = new ArrayList(); - for (String oldInstance : previousMasterAssignmentMap.keySet()) { + for (String oldInstance : previousPrimaryAssignmentMap.keySet()) { oldInstances.add(oldInstance); } - int previousInstanceNum = previousMasterAssignmentMap.size(); + int previousInstanceNum = previousPrimaryAssignmentMap.size(); int partitions = (Integer) (previousIdealState.get("partitions")); // TODO: take weight into account when calculate this - int totalMasterParitionsToMove = + int totalPrimaryParitionsToMove = partitions * (newInstances.size()) / (previousInstanceNum + newInstances.size()); - int numMastersFromEachNode = totalMasterParitionsToMove / previousInstanceNum; - int remain = totalMasterParitionsToMove % previousInstanceNum; + int numPrimariesFromEachNode = totalPrimaryParitionsToMove / previousInstanceNum; + int remain = totalPrimaryParitionsToMove % previousInstanceNum; - // Note that when remain > 0, we should make [remain] moves with (numMastersFromEachNode + 1) + // Note that when remain > 0, we should make [remain] moves with (numPrimariesFromEachNode + 1) // partitions. - // And we should first choose those (numMastersFromEachNode + 1) moves from the instances that + // And we should first choose those (numPrimariesFromEachNode + 1) moves from the instances that // has more - // master partitions - List masterPartitionListToMove = new ArrayList(); + // primary partitions + List primaryPartitionListToMove = new ArrayList(); - // For corresponding moved slave partitions, keep track of their original location; the new node - // does not + // For corresponding moved secondary partitions, keep track of their original location; the new + // node does not // need to migrate all of them. - Map> slavePartitionsToMoveMap = new TreeMap>(); + Map> secondaryPartitionsToMoveMap = new TreeMap>(); - // Make sure that the instances that holds more master partitions are put in front + // Make sure that the instances that holds more primary partitions are put in front List bigList = new ArrayList(), smallList = new ArrayList(); - for (String oldInstance : previousMasterAssignmentMap.keySet()) { - List masterAssignmentList = previousMasterAssignmentMap.get(oldInstance); - if (masterAssignmentList.size() > numMastersFromEachNode) { + for (String oldInstance : previousPrimaryAssignmentMap.keySet()) { + List primaryAssignmentList = previousPrimaryAssignmentMap.get(oldInstance); + if (primaryAssignmentList.size() > numPrimariesFromEachNode) { bigList.add(oldInstance); } else { smallList.add(oldInstance); } } - // "sort" the list, such that the nodes that has more master partitions moves more partitions to - // the + // "sort" the list, such that the nodes that has more primary partitions moves more partitions + // to the // new added batch of instances. bigList.addAll(smallList); - int totalSlaveMoves = 0; for (String oldInstance : bigList) { - List masterAssignmentList = previousMasterAssignmentMap.get(oldInstance); - int numToChoose = numMastersFromEachNode; + List primaryAssignmentList = previousPrimaryAssignmentMap.get(oldInstance); + int numToChoose = numPrimariesFromEachNode; if (remain > 0) { - numToChoose = numMastersFromEachNode + 1; + numToChoose = numPrimariesFromEachNode + 1; remain--; } - // randomly remove numToChoose of master partitions to the new added nodes - ArrayList masterPartionsMoved = new ArrayList(); - randomSelect(masterAssignmentList, masterPartionsMoved, numToChoose); - - masterPartitionListToMove.addAll(masterPartionsMoved); - Map> slaveAssignmentMap = nodeSlaveAssignmentMap.get(oldInstance); - removeFromSlaveAssignmentMap(slaveAssignmentMap, masterPartionsMoved, - slavePartitionsToMoveMap); - - // Make sure that for old instances, the slave placement map is evenly distributed - // Trace the "local slave moves", which should together contribute to most of the slave - // migrations - int movesWithinInstance = - migrateSlaveAssignMapToNewInstances(slaveAssignmentMap, newInstances); + // randomly remove numToChoose of primary partitions to the new added nodes + ArrayList primaryPartionsMoved = new ArrayList(); + randomSelect(primaryAssignmentList, primaryPartionsMoved, numToChoose); + + primaryPartitionListToMove.addAll(primaryPartionsMoved); + Map> secondaryAssignmentMap = + nodeSecondaryAssignmentMap.get(oldInstance); + removeFromSecondaryAssignmentMap(secondaryAssignmentMap, primaryPartionsMoved, + secondaryPartitionsToMoveMap); + + // Make sure that for old instances, the secondary placement map is evenly distributed + // Trace the "local secondary moves", which should together contribute to most of the + // secondary migrations + migrateSecondaryAssignMapToNewInstances(secondaryAssignmentMap, newInstances); // System.out.println("local moves: "+ movesWithinInstance); - totalSlaveMoves += movesWithinInstance; } // System.out.println("local slave moves total: "+ totalSlaveMoves); - // calculate the master /slave assignment for the new added nodes + // calculate the primary /secondary assignment for the new added nodes - // We already have the list of master partitions that will migrate to new batch of instances, + // We already have the list of primary partitions that will migrate to new batch of instances, // shuffle the partitions and assign them to new instances - Collections.shuffle(masterPartitionListToMove, new Random(12345)); + Collections.shuffle(primaryPartitionListToMove, new Random(12345)); for (int i = 0; i < newInstances.size(); i++) { String newInstance = newInstances.get(i); - List masterPartitionList = new ArrayList(); - for (int j = 0; j < masterPartitionListToMove.size(); j++) { + List primaryPartitionList = new ArrayList(); + for (int j = 0; j < primaryPartitionListToMove.size(); j++) { if (j % newInstances.size() == i) { - masterPartitionList.add(masterPartitionListToMove.get(j)); + primaryPartitionList.add(primaryPartitionListToMove.get(j)); } } - Map> slavePartitionMap = new TreeMap>(); + Map> secondaryPartitionMap = new TreeMap>(); for (String oldInstance : oldInstances) { - slavePartitionMap.put(oldInstance, new ArrayList()); + secondaryPartitionMap.put(oldInstance, new ArrayList()); } - // Build the slave assignment map for the new instance, based on the saved information - // about those slave partition locations in slavePartitionsToMoveMap - for (Integer x : masterPartitionList) { - for (String oldInstance : slavePartitionsToMoveMap.keySet()) { - List slaves = slavePartitionsToMoveMap.get(oldInstance); - if (slaves.contains(x)) { - slavePartitionMap.get(oldInstance).add(x); + // Build the secondary assignment map for the new instance, based on the saved information + // about those secondary partition locations in secondaryPartitionsToMoveMap + for (Integer x : primaryPartitionList) { + for (String oldInstance : secondaryPartitionsToMoveMap.keySet()) { + List secondaries = secondaryPartitionsToMoveMap.get(oldInstance); + if (secondaries.contains(x)) { + secondaryPartitionMap.get(oldInstance).add(x); } } } - // add entry for other new instances into the slavePartitionMap + // add entry for other new instances into the secondaryPartitionMap List otherNewInstances = new ArrayList(); for (String instance : newInstances) { if (!instance.equalsIgnoreCase(newInstance)) { otherNewInstances.add(instance); } } - // Make sure that slave partitions are evenly distributed - migrateSlaveAssignMapToNewInstances(slavePartitionMap, otherNewInstances); + // Make sure that secondary partitions are evenly distributed + migrateSecondaryAssignMapToNewInstances(secondaryPartitionMap, otherNewInstances); // Update the result in the result map. We can reuse the input previousIdealState map as // the result. - previousMasterAssignmentMap.put(newInstance, masterPartitionList); - nodeSlaveAssignmentMap.put(newInstance, slavePartitionMap); + previousPrimaryAssignmentMap.put(newInstance, primaryPartitionList); + nodeSecondaryAssignmentMap.put(newInstance, secondaryPartitionMap); } /* @@ -597,27 +599,28 @@ public static Map calculateNextIdealState(List newInstan } public ZNRecord calculateNextIdealState(List newInstances, - Map previousIdealState, String resourceName, String masterStateValue, - String slaveStateValue) { + Map previousIdealState, String resourceName, String primaryStateValue, + String secondaryStateValue) { return convertToZNRecord(calculateNextIdealState(newInstances, previousIdealState), - resourceName, masterStateValue, slaveStateValue); + resourceName, primaryStateValue, secondaryStateValue); } /** - * Given the list of master partition that will be migrated away from the storage instance, - * Remove their entries from the local instance slave assignment map. - * @param slaveAssignmentMap the local instance slave assignment map - * @param masterPartionsMoved the list of master partition ids that will be migrated away - * @param removedAssignmentMap keep track of the removed slave assignment info. The info can be + * Given the list of primary partitions that will be migrated away from the storage instance, + * Remove their entries from the local instance secondary assignment map. + * @param secondaryAssignmentMap the local instance secondary assignment map + * @param primaryPartionsMoved the list of primary partition ids that will be migrated away + * @param removedAssignmentMap keep track of the removed secondary assignment info. The info can + * be * used by new added storage nodes. */ - static void removeFromSlaveAssignmentMap(Map> slaveAssignmentMap, - List masterPartionsMoved, Map> removedAssignmentMap) { - for (String instanceName : slaveAssignmentMap.keySet()) { - List slaveAssignment = slaveAssignmentMap.get(instanceName); - for (Integer partitionId : masterPartionsMoved) { - if (slaveAssignment.contains(partitionId)) { - slaveAssignment.remove(partitionId); + static void removeFromSecondaryAssignmentMap(Map> secondaryAssignmentMap, + List primaryPartionsMoved, Map> removedAssignmentMap) { + for (String instanceName : secondaryAssignmentMap.keySet()) { + List secondaryAssignment = secondaryAssignmentMap.get(instanceName); + for (Integer partitionId : primaryPartionsMoved) { + if (secondaryAssignment.contains(partitionId)) { + secondaryAssignment.remove(partitionId); if (!removedAssignmentMap.containsKey(instanceName)) { removedAssignmentMap.put(instanceName, new ArrayList()); } @@ -629,38 +632,40 @@ static void removeFromSlaveAssignmentMap(Map> slaveAssignm /** * Since some new storage instances are added, each existing storage instance should migrate some - * slave partitions to the new added instances. - * The algorithm keeps moving one partition to from the instance that hosts most slave partitions + * secondary partitions to the new added instances. + * The algorithm keeps moving one partition to from the instance that hosts most secondary + * partitions * to the instance that hosts least number of partitions, until max-min <= 1. - * In this way we can guarantee that all instances hosts almost same number of slave partitions, - * also - * slave partitions are evenly distributed. - * @param slaveAssignmentMap the local instance slave assignment map - * @param masterPartionsMoved the list of master partition ids that will be migrated away - * @param removedAssignmentMap keep track of the removed slave assignment info. The info can be + * In this way we can guarantee that all instances hosts almost same number of secondary + * partitions, also + * secondary partitions are evenly distributed. + * @param secondaryAssignmentMap the local instance secondary assignment map + * @param primaryPartionsMoved the list of primary partition ids that will be migrated away + * @param removedAssignmentMap keep track of the removed secondary assignment info. The info can + * be * used by new added storage nodes. */ - static int migrateSlaveAssignMapToNewInstances(Map> nodeSlaveAssignmentMap, - List newInstances) { + static int migrateSecondaryAssignMapToNewInstances( + Map> secondaryAssignmentMap, List newInstances) { int moves = 0; boolean done = false; for (String newInstance : newInstances) { - nodeSlaveAssignmentMap.put(newInstance, new ArrayList()); + secondaryAssignmentMap.put(newInstance, new ArrayList()); } while (!done) { List maxAssignment = null, minAssignment = null; int minCount = Integer.MAX_VALUE, maxCount = Integer.MIN_VALUE; String minInstance = ""; - for (String instanceName : nodeSlaveAssignmentMap.keySet()) { - List slaveAssignment = nodeSlaveAssignmentMap.get(instanceName); - if (minCount > slaveAssignment.size()) { - minCount = slaveAssignment.size(); - minAssignment = slaveAssignment; + for (String instanceName : secondaryAssignmentMap.keySet()) { + List secondaryAssignment = secondaryAssignmentMap.get(instanceName); + if (minCount > secondaryAssignment.size()) { + minCount = secondaryAssignment.size(); + minAssignment = secondaryAssignment; minInstance = instanceName; } - if (maxCount < slaveAssignment.size()) { - maxCount = slaveAssignment.size(); - maxAssignment = slaveAssignment; + if (maxCount < secondaryAssignment.size()) { + maxCount = secondaryAssignment.size(); + maxAssignment = secondaryAssignment; } } if (maxCount - minCount <= 1) { @@ -688,7 +693,7 @@ static int migrateSlaveAssignMapToNewInstances(Map> nodeSl /** * Randomly select a number of elements from original list and put them in the selectedList - * The algorithm is used to select master partitions to be migrated when new instances are added. + * The algorithm is used to select primary partitions to be migrated when new instances are added. * @param originalList the original list * @param selectedList the list that contain selected elements * @param num number of elements to be selected @@ -716,7 +721,7 @@ public static void main(String args[]) { } int partitions = 48 * 3, replicas = 3; Map resultOriginal = - DefaultIdealStateCalculator.calculateInitialIdealState(instanceNames, partitions, replicas); + DefaultTwoStateStrategy.calculateInitialIdealState(instanceNames, partitions, replicas); } } diff --git a/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorForEspressoRelay.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/EspressoRelayStrategy.java similarity index 98% rename from helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorForEspressoRelay.java rename to helix-core/src/main/java/org/apache/helix/controller/strategy/EspressoRelayStrategy.java index 25ae625fe8..4e88499c2a 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorForEspressoRelay.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/EspressoRelayStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix.tools; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -29,7 +29,7 @@ import org.apache.helix.HelixException; import org.apache.helix.model.IdealState; -public class IdealStateCalculatorForEspressoRelay { +public class EspressoRelayStrategy { public static IdealState calculateRelayIdealState(List partitions, List instances, String resultRecordName, int replica, String firstValue, String restValue, String stateModelName) { diff --git a/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByRush.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/RUSHMasterSlaveStrategy.java similarity index 97% rename from helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByRush.java rename to helix-core/src/main/java/org/apache/helix/controller/strategy/RUSHMasterSlaveStrategy.java index 7677b42b58..39561ae50b 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByRush.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/RUSHMasterSlaveStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix.tools; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -29,7 +29,7 @@ import org.apache.helix.ZNRecord; import org.apache.helix.model.IdealState.IdealStateProperty; -public class IdealStateCalculatorByRush { +public class RUSHMasterSlaveStrategy { /** * Build the config map for RUSH algorithm. The input of RUSH algorithm groups * nodes into "cluster"s, and different clusters can be assigned with @@ -263,7 +263,7 @@ public static void main(String args[]) throws Exception { List weights1 = new ArrayList(); weights1.add(1); ZNRecord result = - IdealStateCalculatorByRush.calculateIdealState(instanceCluster1, weights1, partitions, + RUSHMasterSlaveStrategy.calculateIdealState(instanceCluster1, weights1, partitions, replicas, resourceName); printIdealStateStats(result); @@ -275,7 +275,7 @@ public static void main(String args[]) throws Exception { instanceCluster1.add(instanceNames2); weights1.add(1); ZNRecord result2 = - IdealStateCalculatorByRush.calculateIdealState(instanceCluster1, weights1, partitions, + RUSHMasterSlaveStrategy.calculateIdealState(instanceCluster1, weights1, partitions, replicas, resourceName); printDiff(result, result2); diff --git a/helix-core/src/main/java/org/apache/helix/tools/RUSHrHash.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/RUSHrHash.java similarity index 99% rename from helix-core/src/main/java/org/apache/helix/tools/RUSHrHash.java rename to helix-core/src/main/java/org/apache/helix/controller/strategy/RUSHrHash.java index fb9c594e4e..e3972ff98e 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/RUSHrHash.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/RUSHrHash.java @@ -1,4 +1,4 @@ -package org.apache.helix.tools; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one diff --git a/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByShuffling.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/ShufflingTwoStateStrategy.java similarity index 74% rename from helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByShuffling.java rename to helix-core/src/main/java/org/apache/helix/controller/strategy/ShufflingTwoStateStrategy.java index d4764eff57..7b4ce739f7 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByShuffling.java +++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/ShufflingTwoStateStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix.tools; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -31,7 +31,8 @@ /* * Ideal state calculator for the cluster manager V1. The ideal state is - * calculated by randomly assign master partitions to storage nodes. + * calculated by randomly assign primary partitions to storage nodes. This is intended for a + * two-state scheme where one is primary and the other is secondary. * * Note that the following code is a native strategy and is for cluster manager V1 only. We will * use the other algorithm to calculate the ideal state in future milestones. @@ -39,19 +40,30 @@ * * */ -public class IdealStateCalculatorByShuffling { +public class ShufflingTwoStateStrategy { /* * Given the number of nodes, partitions and replica number, calculate the - * ideal state in the following manner: For the master partition assignment, + * ideal state in the following manner: For the primary partition assignment, * 1. construct Arraylist partitionList, with partitionList[i] = i; 2. Shuffle * the partitions array 3. Scan the shuffled array, then assign * partitionList[i] to node (i % nodes) * for the slave partitions, simply put them in the node after the node that + * <<<<<<< + * HEAD:helix-core/src/main/java/org/apache/helix/controller/strategy/ShufflingTwoStateStrategy + * .java + * contains the primary partition. + * ======= * contains the master partition. + * >>>>>>> + * master:helix-core/src/main/java/org/apache/helix/tools/IdealStateCalculatorByShuffling.java * The result of the method is a ZNRecord, which contains a list of maps; each - * map is from the name of nodes to either "MASTER" or "SLAVE". + * map is from the name of nodes to either state name ("MASTER" or "SLAVE" for + * MasterSlave). */ + /** + * Calculate an ideal state for a MasterSlave configuration + */ public static ZNRecord calculateIdealState(List instanceNames, int partitions, int replicas, String resourceName, long randomSeed) { return calculateIdealState(instanceNames, partitions, replicas, resourceName, randomSeed, @@ -59,9 +71,9 @@ public static ZNRecord calculateIdealState(List instanceNames, int parti } public static ZNRecord calculateIdealState(List instanceNames, int partitions, - int replicas, String resourceName, long randomSeed, String masterValue, String slaveValue) { + int replicas, String resourceName, long randomSeed, String primaryValue, String secondaryValue) { if (instanceNames.size() <= replicas) { - throw new IllegalArgumentException("Replicas must be less than number of storage nodes"); + throw new IllegalArgumentException("Replicas must be less than number of nodes"); } Collections.sort(instanceNames); @@ -79,18 +91,18 @@ public static ZNRecord calculateIdealState(List instanceNames, int parti for (int i = 0; i < partitionList.size(); i++) { int partitionId = partitionList.get(i); Map partitionAssignment = new TreeMap(); - int masterNode = i % instanceNames.size(); - // the first in the list is the node that contains the master - partitionAssignment.put(instanceNames.get(masterNode), masterValue); + int primaryNode = i % instanceNames.size(); + // the first in the list is the node that contains the primary + partitionAssignment.put(instanceNames.get(primaryNode), primaryValue); - // for the jth replica, we put it on (masterNode + j) % nodes-th + // for the jth replica, we put it on (primaryNode + j) % nodes-th // node for (int j = 1; j <= replicas; j++) { - int index = (masterNode + j * partitionList.size()) % instanceNames.size(); + int index = (primaryNode + j * partitionList.size()) % instanceNames.size(); while (partitionAssignment.keySet().contains(instanceNames.get(index))) { index = (index + 1) % instanceNames.size(); } - partitionAssignment.put(instanceNames.get(index), slaveValue); + partitionAssignment.put(instanceNames.get(index), secondaryValue); } String partitionName = resourceName + "_" + partitionId; result.setMapField(partitionName, partitionAssignment); diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java index 08a8208103..5da5b275d8 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java @@ -33,7 +33,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -54,10 +53,10 @@ import org.apache.helix.ZNRecord; import org.apache.helix.alerts.AlertsHolder; import org.apache.helix.alerts.StatsHolder; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.model.Alerts; import org.apache.helix.model.ClusterConstraints; import org.apache.helix.model.ClusterConstraints.ConstraintType; -import org.apache.helix.model.ConfigScope; import org.apache.helix.model.ConstraintItem; import org.apache.helix.model.CurrentState; import org.apache.helix.model.ExternalView; @@ -74,7 +73,6 @@ import org.apache.helix.model.PauseSignal; import org.apache.helix.model.PersistentStats; import org.apache.helix.model.StateModelDefinition; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.apache.helix.util.HelixUtil; import org.apache.helix.util.RebalanceUtil; import org.apache.log4j.Logger; @@ -1025,7 +1023,7 @@ void rebalance(String clusterName, String resourceName, int replica, String keyP } if (idealState.getRebalanceMode() != RebalanceMode.FULL_AUTO) { ZNRecord newIdealState = - DefaultIdealStateCalculator.calculateIdealState(instanceNames, partitions, replica, + DefaultTwoStateStrategy.calculateIdealState(instanceNames, partitions, replica, keyPrefix, masterStateValue, slaveStateValue); // for now keep mapField in SEMI_AUTO mode and remove listField in CUSTOMIZED mode @@ -1156,7 +1154,7 @@ public void rebalance(String clusterName, IdealState currentIdealState, List balancedRecord = - DefaultIdealStateCalculator.calculateNextIdealState(instanceNames, previousIdealState); + DefaultTwoStateStrategy.calculateNextIdealState(instanceNames, previousIdealState); StateModelDefinition stateModDef = this.getStateModelDef(clusterName, currentIdealState.getStateModelDefRef()); @@ -1167,7 +1165,7 @@ public void rebalance(String clusterName, IdealState currentIdealState, List partitionSet = new HashSet(); partitionSet.addAll(newIdealStateRecord.getMapFields().keySet()); diff --git a/helix-core/src/main/java/org/apache/helix/tools/YAISCalculator.java b/helix-core/src/main/java/org/apache/helix/tools/YAISCalculator.java deleted file mode 100644 index 4292baaf52..0000000000 --- a/helix-core/src/main/java/org/apache/helix/tools/YAISCalculator.java +++ /dev/null @@ -1,174 +0,0 @@ -package org.apache.helix.tools; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.LinkedList; -import java.util.List; -import java.util.Random; - -public class YAISCalculator { - static class Assignment { - private final int numNodes; - private final int replication; - Partition[] partitions; - Node[] nodes; - - public Assignment(int numNodes, int numPartitions, int replication) { - this.numNodes = numNodes; - this.replication = replication; - partitions = new Partition[numPartitions]; - for (int i = 0; i < numPartitions; i++) { - partitions[i] = new Partition(i, replication); - } - nodes = new Node[numNodes]; - for (int i = 0; i < numNodes; i++) { - nodes[i] = new Node(replication); - } - } - - public void assign(int partitionId, int replicaId, int nodeId) { - System.out.println("Assigning (" + partitionId + "," + replicaId + ") to " + nodeId); - partitions[partitionId].nodeIds[replicaId] = nodeId; - nodes[nodeId].partitionLists.get(replicaId).push(partitionId); - } - - public void unassign(int partitionId, int replicaId) { - - } - - Integer[] getPartitionsPerNode(int nodeId, int replicaId) { - List partitionsList = new ArrayList(); - for (Partition p : partitions) { - if (p.nodeIds[replicaId] == nodeId) { - partitionsList.add(p.partionId); - } - } - Integer[] array = new Integer[partitionsList.size()]; - partitionsList.toArray(array); - return array; - } - - public void printPerNode() { - for (int nodeId = 0; nodeId < numNodes; nodeId++) { - for (int r = 0; r < replication; r++) { - StringBuilder sb = new StringBuilder(); - sb.append("(").append(nodeId).append(",").append(r).append("):\t"); - Node node = nodes[nodeId]; - LinkedList linkedList = node.partitionLists.get(r); - for (int partitionId : linkedList) { - sb.append(partitionId).append(","); - } - System.out.println(sb.toString()); - } - - } - } - } - - static class Partition { - - final int partionId; - - public Partition(int partionId, int replication) { - this.partionId = partionId; - nodeIds = new int[replication]; - Arrays.fill(nodeIds, -1); - } - - int nodeIds[]; - } - - static class Node { - private final int replication; - ArrayList> partitionLists; - - public Node(int replication) { - this.replication = replication; - partitionLists = new ArrayList>(replication); - for (int i = 0; i < replication; i++) { - partitionLists.add(new LinkedList()); - } - } - - } - - public static void main(String[] args) { - doAssignment(new int[] { - 5 - }, 120, 3); - } - - private static void doAssignment(int[] nodes, int partitions, int replication) { - int N = nodes[0]; - int totalNodes = 0; - for (int temp : nodes) { - totalNodes += temp; - } - Assignment assignment = new Assignment(totalNodes, partitions, replication); - int nodeId = 0; - for (int i = 0; i < partitions; i++) { - assignment.assign(i, 0, nodeId); - nodeId = (nodeId + 1) % N; - } - Random random = new Random(); - for (int r = 1; r < replication; r++) { - for (int id = 0; id < N; id++) { - Integer[] partitionsPerNode = assignment.getPartitionsPerNode(id, 0); - boolean[] used = new boolean[partitionsPerNode.length]; - Arrays.fill(used, false); - System.out.println(id + "-" + partitionsPerNode.length); - nodeId = (id + r) % N; - int count = partitionsPerNode.length; - boolean done = false; - do { - if (nodeId != id) { - int nextInt = random.nextInt(count); - int temp = 0; - for (int b = 0; b < used.length; b++) { - if (!used[b] && temp == nextInt) { - assignment.assign(partitionsPerNode[b], r, nodeId); - used[b] = true; - break; - } - } - } - nodeId = (nodeId + 1) % N; - } while (count > 0); - - } - } - if (nodes.length > 1) { - int prevNodeCount = nodes[0]; - for (int i = 1; i < nodes.length; i++) { - int newNodeCount = prevNodeCount + nodes[i]; - int masterPartitionsToMove = - (int) ((partitions * 1.0 / prevNodeCount - partitions * 1.0 / newNodeCount) * 1 * prevNodeCount); - while (masterPartitionsToMove > 0) { - - } - - } - } - assignment.printPerNode(); - } - -} diff --git a/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java b/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java index 3f8c406397..273adc323b 100644 --- a/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java +++ b/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java @@ -90,8 +90,8 @@ public static Map buildInternalIdealState(IdealState state) { } Map result = new TreeMap(); - result.put("MasterAssignmentMap", nodeMasterAssignmentMap); - result.put("SlaveAssignmentMap", combinedNodeSlaveAssignmentMap); + result.put("PrimaryAssignmentMap", nodeMasterAssignmentMap); + result.put("SecondaryAssignmentMap", combinedNodeSlaveAssignmentMap); result.put("replicas", Integer.parseInt(state.getReplicas())); result.put("partitions", new Integer(state.getRecord().getListFields().size())); result.put("reversePartitionIndex", reversePartitionIndex); diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java index 391d1af881..0b97e205f5 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java @@ -23,15 +23,13 @@ import java.util.List; import org.apache.helix.Mocks; -import org.apache.helix.ZNRecord; import org.apache.helix.PropertyKey.Builder; +import org.apache.helix.ZNRecord; import org.apache.helix.controller.pipeline.StageContext; -import org.apache.helix.controller.stages.CompatibilityCheckStage; -import org.apache.helix.controller.stages.ReadClusterDataStage; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.model.IdealState; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.LiveInstance.LiveInstanceProperty; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.testng.Assert; import org.testng.annotations.Test; @@ -50,8 +48,8 @@ private void prepare(String controllerVersion, String participantVersion, // set ideal state String resourceName = "testResource"; ZNRecord record = - DefaultIdealStateCalculator.calculateIdealState(instances, partitions, replicas, - resourceName, "MASTER", "SLAVE"); + DefaultTwoStateStrategy.calculateIdealState(instances, partitions, replicas, resourceName, + "MASTER", "SLAVE"); IdealState idealState = new IdealState(record); idealState.setStateModelDefRef("MasterSlave"); diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java index dcb955c3e3..6febe934d4 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java @@ -25,19 +25,15 @@ import java.util.UUID; import org.apache.helix.HelixDataAccessor; -import org.apache.helix.ZNRecord; import org.apache.helix.PropertyKey.Builder; +import org.apache.helix.ZNRecord; import org.apache.helix.controller.pipeline.StageContext; -import org.apache.helix.controller.stages.AttributeName; -import org.apache.helix.controller.stages.ClusterEvent; -import org.apache.helix.controller.stages.ReadClusterDataStage; -import org.apache.helix.controller.stages.ResourceComputationStage; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.model.CurrentState; import org.apache.helix.model.IdealState; +import org.apache.helix.model.IdealState.RebalanceMode; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Resource; -import org.apache.helix.model.IdealState.RebalanceMode; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.testng.AssertJUnit; import org.testng.annotations.Test; @@ -57,8 +53,8 @@ public void testSimple() throws Exception { int replicas = 1; String resourceName = "testResource"; ZNRecord record = - DefaultIdealStateCalculator.calculateIdealState(instances, partitions, replicas, - resourceName, "MASTER", "SLAVE"); + DefaultTwoStateStrategy.calculateIdealState(instances, partitions, replicas, resourceName, + "MASTER", "SLAVE"); IdealState idealState = new IdealState(record); idealState.setStateModelDefRef("MasterSlave"); @@ -122,7 +118,7 @@ public void testMultipleResourcesWithSomeDropped() throws Exception { int replicas = 1; String resourceName = resources[i]; ZNRecord record = - DefaultIdealStateCalculator.calculateIdealState(instances, partitions, replicas, + DefaultTwoStateStrategy.calculateIdealState(instances, partitions, replicas, resourceName, "MASTER", "SLAVE"); IdealState idealState = new IdealState(record); idealState.setStateModelDefRef("MasterSlave"); diff --git a/helix-core/src/test/java/org/apache/helix/TestRelayIdealStateCalculator.java b/helix-core/src/test/java/org/apache/helix/controller/strategy/TestEspressoRelayStrategy.java similarity index 89% rename from helix-core/src/test/java/org/apache/helix/TestRelayIdealStateCalculator.java rename to helix-core/src/test/java/org/apache/helix/controller/strategy/TestEspressoRelayStrategy.java index ca58659ed6..ee0d14351f 100644 --- a/helix-core/src/test/java/org/apache/helix/TestRelayIdealStateCalculator.java +++ b/helix-core/src/test/java/org/apache/helix/controller/strategy/TestEspressoRelayStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -20,17 +20,15 @@ */ import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import org.apache.helix.model.IdealState; -import org.apache.helix.tools.IdealStateCalculatorForEspressoRelay; import org.testng.Assert; import org.testng.annotations.Test; -public class TestRelayIdealStateCalculator { +public class TestEspressoRelayStrategy { @Test() public void testEspressoStorageClusterIdealState() throws Exception { testEspressoStorageClusterIdealState(15, 9, 3); @@ -52,8 +50,8 @@ public void testEspressoStorageClusterIdealState(int partitions, int nodes, int } IdealState idealstate = - IdealStateCalculatorForEspressoRelay.calculateRelayIdealState(storageNodes, relays, "TEST", - replica, "Leader", "Standby", "LeaderStandby"); + EspressoRelayStrategy.calculateRelayIdealState(storageNodes, relays, "TEST", replica, + "Leader", "Standby", "LeaderStandby"); Assert.assertEquals(idealstate.getRecord().getListFields().size(), idealstate.getRecord() .getMapFields().size()); diff --git a/helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java b/helix-core/src/test/java/org/apache/helix/controller/strategy/TestEspressoStorageClusterIdealState.java similarity index 90% rename from helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java rename to helix-core/src/test/java/org/apache/helix/controller/strategy/TestEspressoStorageClusterIdealState.java index 9b249ece51..dbb870ec6b 100644 --- a/helix-core/src/test/java/org/apache/helix/TestEspressoStorageClusterIdealState.java +++ b/helix-core/src/test/java/org/apache/helix/controller/strategy/TestEspressoStorageClusterIdealState.java @@ -1,4 +1,4 @@ -package org.apache.helix; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -29,15 +29,13 @@ import org.apache.helix.ZNRecord; import org.apache.helix.model.IdealState; -import org.apache.helix.tools.ClusterSetup; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.apache.helix.util.RebalanceUtil; import org.testng.Assert; import org.testng.AssertJUnit; import org.testng.annotations.Test; public class TestEspressoStorageClusterIdealState { - @Test() + @Test public void testEspressoStorageClusterIdealState() throws Exception { List instanceNames = new ArrayList(); for (int i = 0; i < 5; i++) { @@ -45,7 +43,7 @@ public void testEspressoStorageClusterIdealState() throws Exception { } int partitions = 8, replicas = 0; Map result0 = - DefaultIdealStateCalculator.calculateInitialIdealState(instanceNames, partitions, replicas); + DefaultTwoStateStrategy.calculateInitialIdealState(instanceNames, partitions, replicas); Verify(result0, partitions, replicas); partitions = 8192; @@ -56,27 +54,27 @@ public void testEspressoStorageClusterIdealState() throws Exception { instanceNames.add("localhost:123" + i); } Map resultOriginal = - DefaultIdealStateCalculator.calculateInitialIdealState(instanceNames, partitions, replicas); + DefaultTwoStateStrategy.calculateInitialIdealState(instanceNames, partitions, replicas); Verify(resultOriginal, partitions, replicas); printStat(resultOriginal); Map result1 = - DefaultIdealStateCalculator.calculateInitialIdealState(instanceNames, partitions, replicas); + DefaultTwoStateStrategy.calculateInitialIdealState(instanceNames, partitions, replicas); List instanceNames2 = new ArrayList(); for (int i = 30; i < 35; i++) { instanceNames2.add("localhost:123" + i); } - DefaultIdealStateCalculator.calculateNextIdealState(instanceNames2, result1); + DefaultTwoStateStrategy.calculateNextIdealState(instanceNames2, result1); List instanceNames3 = new ArrayList(); for (int i = 35; i < 40; i++) { instanceNames3.add("localhost:123" + i); } - DefaultIdealStateCalculator.calculateNextIdealState(instanceNames3, result1); + DefaultTwoStateStrategy.calculateNextIdealState(instanceNames3, result1); Double masterKeepRatio = 0.0, slaveKeepRatio = 0.0; Verify(result1, partitions, replicas); double[] result = compareResult(resultOriginal, result1); @@ -97,21 +95,20 @@ public void testRebalance2() { } Map resultOriginal = - DefaultIdealStateCalculator.calculateInitialIdealState(instanceNames, partitions, replicas); + DefaultTwoStateStrategy.calculateInitialIdealState(instanceNames, partitions, replicas); ZNRecord idealState1 = - DefaultIdealStateCalculator.convertToZNRecord(resultOriginal, "TestDB", "MASTER", "SLAVE"); + DefaultTwoStateStrategy.convertToZNRecord(resultOriginal, "TestDB", "MASTER", "SLAVE"); Map result1 = RebalanceUtil.buildInternalIdealState(new IdealState(idealState1)); - List instanceNames2 = new ArrayList(); for (int i = 30; i < 35; i++) { instanceNames2.add("localhost:123" + i); } Map result2 = - DefaultIdealStateCalculator.calculateNextIdealState(instanceNames2, result1); + DefaultTwoStateStrategy.calculateNextIdealState(instanceNames2, result1); Verify(resultOriginal, partitions, replicas); Verify(result2, partitions, replicas); @@ -125,9 +122,9 @@ public void testRebalance2() { public static void Verify(Map result, int partitions, int replicas) { Map> masterAssignmentMap = - (Map>) (result.get("MasterAssignmentMap")); + (Map>) (result.get("PrimaryAssignmentMap")); Map>> nodeSlaveAssignmentMap = - (Map>>) (result.get("SlaveAssignmentMap")); + (Map>>) (result.get("SecondaryAssignmentMap")); AssertJUnit.assertTrue(partitions == (Integer) (result.get("partitions"))); @@ -224,14 +221,14 @@ public void printStat(Map result) { public static double[] compareResult(Map result1, Map result2) { double[] result = new double[2]; Map> masterAssignmentMap1 = - (Map>) (result1.get("MasterAssignmentMap")); + (Map>) (result1.get("PrimaryAssignmentMap")); Map>> nodeSlaveAssignmentMap1 = - (Map>>) (result1.get("SlaveAssignmentMap")); + (Map>>) (result1.get("SecondaryAssignmentMap")); Map> masterAssignmentMap2 = - (Map>) (result2.get("MasterAssignmentMap")); + (Map>) (result2.get("PrimaryAssignmentMap")); Map>> nodeSlaveAssignmentMap2 = - (Map>>) (result2.get("SlaveAssignmentMap")); + (Map>>) (result2.get("SecondaryAssignmentMap")); int commonMasters = 0; int commonSlaves = 0; diff --git a/helix-core/src/test/java/org/apache/helix/TestShuffledIdealState.java b/helix-core/src/test/java/org/apache/helix/controller/strategy/TestShufflingTwoStateStrategy.java similarity index 70% rename from helix-core/src/test/java/org/apache/helix/TestShuffledIdealState.java rename to helix-core/src/test/java/org/apache/helix/controller/strategy/TestShufflingTwoStateStrategy.java index 5c158f8d10..0269764204 100644 --- a/helix-core/src/test/java/org/apache/helix/TestShuffledIdealState.java +++ b/helix-core/src/test/java/org/apache/helix/controller/strategy/TestShufflingTwoStateStrategy.java @@ -1,4 +1,4 @@ -package org.apache.helix; +package org.apache.helix.controller.strategy; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -29,9 +29,6 @@ import java.util.Map; import org.apache.helix.ZNRecord; -import org.apache.helix.tools.IdealCalculatorByConsistentHashing; -import org.apache.helix.tools.IdealStateCalculatorByRush; -import org.apache.helix.tools.IdealStateCalculatorByShuffling; import org.codehaus.jackson.JsonGenerationException; import org.codehaus.jackson.map.JsonMappingException; import org.codehaus.jackson.map.ObjectMapper; @@ -39,7 +36,7 @@ import org.testng.AssertJUnit; import org.testng.annotations.Test; -public class TestShuffledIdealState { +public class TestShufflingTwoStateStrategy { @Test() public void testInvocation() throws Exception { int partitions = 6, replicas = 2; @@ -51,22 +48,20 @@ public void testInvocation() throws Exception { instanceNames.add("localhost_1234"); ZNRecord result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); ZNRecord result2 = - IdealStateCalculatorByRush.calculateIdealState(instanceNames, 1, partitions, replicas, - dbName); + RUSHMasterSlaveStrategy.calculateIdealState(instanceNames, 1, partitions, replicas, dbName); ZNRecord result3 = - IdealCalculatorByConsistentHashing.calculateIdealState(instanceNames, partitions, replicas, - dbName, new IdealCalculatorByConsistentHashing.FnvHash()); - IdealCalculatorByConsistentHashing.printIdealStateStats(result3, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result3, "SLAVE"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result3, ""); - IdealCalculatorByConsistentHashing.printNodeOfflineOverhead(result3); + ConsistentHashingMasterSlaveStrategy.calculateIdealState(instanceNames, partitions, + replicas, dbName, new ConsistentHashingMasterSlaveStrategy.FnvHash()); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result3, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result3, "SLAVE"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result3, ""); + ConsistentHashingMasterSlaveStrategy.printNodeOfflineOverhead(result3); // System.out.println(result); ObjectMapper mapper = new ObjectMapper(); @@ -125,10 +120,9 @@ public void testShuffledIdealState() { instanceNames.add("localhost_1234"); ZNRecord result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); Assert.assertTrue(verify(result)); // partition is less than nodes @@ -141,10 +135,9 @@ public void testShuffledIdealState() { instanceNames.add("localhost_" + (1231 + i)); } result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); Assert.assertTrue(verify(result)); // partitions is multiple of nodes @@ -157,10 +150,9 @@ public void testShuffledIdealState() { instanceNames.add("localhost_" + (1231 + i)); } result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); Assert.assertTrue(verify(result)); // nodes are multiple of partitions @@ -173,10 +165,9 @@ public void testShuffledIdealState() { instanceNames.add("localhost_" + (1231 + i)); } result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); Assert.assertTrue(verify(result)); // nodes are multiple of partitions @@ -189,10 +180,9 @@ public void testShuffledIdealState() { instanceNames.add("localhost_" + (1231 + i)); } result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); Assert.assertTrue(verify(result)); // Just fits @@ -205,10 +195,9 @@ public void testShuffledIdealState() { instanceNames.add("localhost_" + (1231 + i)); } result = - IdealStateCalculatorByShuffling.calculateIdealState(instanceNames, partitions, replicas, - dbName); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "MASTER"); - IdealCalculatorByConsistentHashing.printIdealStateStats(result, "SLAVE"); + ShufflingTwoStateStrategy.calculateIdealState(instanceNames, partitions, replicas, dbName); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "MASTER"); + ConsistentHashingMasterSlaveStrategy.printIdealStateStats(result, "SLAVE"); Assert.assertTrue(verify(result)); } diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestAutoIsWithEmptyMap.java b/helix-core/src/test/java/org/apache/helix/integration/TestAutoIsWithEmptyMap.java index 1ffb86fda3..a00881484f 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestAutoIsWithEmptyMap.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestAutoIsWithEmptyMap.java @@ -28,10 +28,10 @@ import org.apache.helix.TestHelper; import org.apache.helix.ZNRecord; import org.apache.helix.controller.HelixControllerMain; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.mock.participant.MockParticipant; import org.apache.helix.model.IdealState; import org.apache.helix.tools.ClusterStateVerifier; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.apache.helix.tools.ClusterStateVerifier.BestPossAndExtViewZkVerifier; import org.testng.Assert; import org.testng.annotations.Test; @@ -63,7 +63,7 @@ public void testAutoIsWithEmptyMap() throws Exception { instanceNames.add("localhost_" + port); } ZNRecord idealState = - DefaultIdealStateCalculator.calculateIdealState(instanceNames, 10, 2, "TestDB0", "LEADER", + DefaultTwoStateStrategy.calculateIdealState(instanceNames, 10, 2, "TestDB0", "LEADER", "STANDBY"); // System.out.println(idealState); // curIdealState.setSimpleField(IdealState.IdealStateProperty.IDEAL_STATE_MODE.toString(), diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestDriver.java b/helix-core/src/test/java/org/apache/helix/integration/TestDriver.java index 951607bc8e..b29e25dbaa 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestDriver.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestDriver.java @@ -32,9 +32,10 @@ import org.apache.helix.PropertyPathConfig; import org.apache.helix.PropertyType; import org.apache.helix.TestHelper; -import org.apache.helix.ZNRecord; import org.apache.helix.TestHelper.StartCMResult; +import org.apache.helix.ZNRecord; import org.apache.helix.controller.HelixControllerMain; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.manager.zk.ZNRecordSerializer; import org.apache.helix.manager.zk.ZkClient; import org.apache.helix.model.IdealState.IdealStateProperty; @@ -43,14 +44,13 @@ import org.apache.helix.store.PropertyStoreException; import org.apache.helix.tools.ClusterSetup; import org.apache.helix.tools.ClusterStateVerifier; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.apache.helix.tools.TestCommand; -import org.apache.helix.tools.TestExecutor; -import org.apache.helix.tools.TestTrigger; -import org.apache.helix.tools.ZnodeOpArg; import org.apache.helix.tools.TestCommand.CommandType; import org.apache.helix.tools.TestCommand.NodeOpArg; +import org.apache.helix.tools.TestExecutor; import org.apache.helix.tools.TestExecutor.ZnodePropertyType; +import org.apache.helix.tools.TestTrigger; +import org.apache.helix.tools.ZnodeOpArg; import org.apache.log4j.Logger; import org.testng.Assert; @@ -331,8 +331,8 @@ public static void setIdealState(String uniqClusterName, long beginTime, int per for (int i = 0; i < testInfo._numDb; i++) { String dbName = TEST_DB_PREFIX + i; ZNRecord destIS = - DefaultIdealStateCalculator.calculateIdealState(instanceNames, - testInfo._numPartitionsPerDb, testInfo._replica - 1, dbName, "MASTER", "SLAVE"); + DefaultTwoStateStrategy.calculateIdealState(instanceNames, testInfo._numPartitionsPerDb, + testInfo._replica - 1, dbName, "MASTER", "SLAVE"); // destIS.setId(dbName); destIS.setSimpleField(IdealStateProperty.REBALANCE_MODE.toString(), RebalanceMode.CUSTOMIZED.toString()); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestExpandCluster.java b/helix-core/src/test/java/org/apache/helix/integration/TestExpandCluster.java index a9aa3b9334..8268679638 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestExpandCluster.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestExpandCluster.java @@ -21,7 +21,7 @@ import java.util.Map; -import org.apache.helix.TestEspressoStorageClusterIdealState; +import org.apache.helix.controller.strategy.TestEspressoStorageClusterIdealState; import org.apache.helix.model.IdealState; import org.apache.helix.tools.ClusterSetup; import org.apache.helix.util.RebalanceUtil; diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java b/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java index ed056ab6c6..c3133cc79f 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java @@ -24,17 +24,17 @@ import java.util.List; import java.util.Map; +import org.apache.helix.PropertyKey.Builder; import org.apache.helix.TestHelper; import org.apache.helix.ZNRecord; -import org.apache.helix.PropertyKey.Builder; import org.apache.helix.controller.HelixControllerMain; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.manager.zk.ZKHelixDataAccessor; import org.apache.helix.manager.zk.ZkBaseDataAccessor; import org.apache.helix.mock.participant.MockParticipant; import org.apache.helix.model.IdealState; import org.apache.helix.model.IdealState.RebalanceMode; import org.apache.helix.tools.ClusterStateVerifier; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.testng.Assert; import org.testng.annotations.Test; @@ -95,8 +95,8 @@ public void testRenamePartitionCustomIS() throws Exception { Arrays.asList("localhost_12918", "localhost_12919", "localhost_12920", "localhost_12921", "localhost_12922"); ZNRecord destIS = - DefaultIdealStateCalculator.calculateIdealState(instanceNames, 10, 3 - 1, "TestDB0", - "MASTER", "SLAVE"); + DefaultTwoStateStrategy.calculateIdealState(instanceNames, 10, 3 - 1, "TestDB0", "MASTER", + "SLAVE"); IdealState idealState = new IdealState(destIS); idealState.setRebalanceMode(RebalanceMode.CUSTOMIZED); idealState.setReplicas("3"); diff --git a/helix-core/src/test/java/org/apache/helix/josql/TestClusterJosqlQueryProcessor.java b/helix-core/src/test/java/org/apache/helix/josql/TestClusterJosqlQueryProcessor.java index 8090201a3e..30c23fb081 100644 --- a/helix-core/src/test/java/org/apache/helix/josql/TestClusterJosqlQueryProcessor.java +++ b/helix-core/src/test/java/org/apache/helix/josql/TestClusterJosqlQueryProcessor.java @@ -28,10 +28,8 @@ import org.apache.helix.Criteria; import org.apache.helix.InstanceType; import org.apache.helix.ZNRecord; -import org.apache.helix.josql.ZNRecordJosqlFunctionHandler; -import org.apache.helix.josql.ZNRecordRow; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.model.LiveInstance.LiveInstanceProperty; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.josql.Query; import org.josql.QueryExecutionException; import org.josql.QueryParseException; @@ -59,9 +57,7 @@ public void queryClusterDataSample() { // liveInstances.remove(0); ZNRecord externalView = - DefaultIdealStateCalculator.calculateIdealState(instances, 21, 3, "TestDB", "MASTER", - "SLAVE"); - + DefaultTwoStateStrategy.calculateIdealState(instances, 21, 3, "TestDB", "MASTER", "SLAVE"); Criteria criteria = new Criteria(); criteria.setInstanceName("%"); criteria.setResource("TestDB"); diff --git a/helix-core/src/test/java/org/apache/helix/messaging/TestDefaultMessagingService.java b/helix-core/src/test/java/org/apache/helix/messaging/TestDefaultMessagingService.java index 9686e16d48..e00b5acd5b 100644 --- a/helix-core/src/test/java/org/apache/helix/messaging/TestDefaultMessagingService.java +++ b/helix-core/src/test/java/org/apache/helix/messaging/TestDefaultMessagingService.java @@ -33,13 +33,13 @@ import org.apache.helix.PropertyKey; import org.apache.helix.PropertyType; import org.apache.helix.ZNRecord; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.messaging.handling.HelixTaskResult; import org.apache.helix.messaging.handling.MessageHandler; import org.apache.helix.messaging.handling.MessageHandlerFactory; import org.apache.helix.model.ExternalView; import org.apache.helix.model.LiveInstance.LiveInstanceProperty; import org.apache.helix.model.Message; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.testng.AssertJUnit; import org.testng.annotations.Test; @@ -94,7 +94,7 @@ public MockHelixManager() { _liveInstances.add(metaData); } _externalView = - DefaultIdealStateCalculator.calculateIdealState(_instances, _partitions, _replicas, _db, + DefaultTwoStateStrategy.calculateIdealState(_instances, _partitions, _replicas, _db, "MASTER", "SLAVE"); } diff --git a/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestClusterStatusMonitor.java b/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestClusterStatusMonitor.java index 711aff2c4b..facb4ea9df 100644 --- a/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestClusterStatusMonitor.java +++ b/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestClusterStatusMonitor.java @@ -29,8 +29,8 @@ import org.apache.helix.NotificationContext; import org.apache.helix.PropertyType; import org.apache.helix.ZNRecord; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.model.LiveInstance.LiveInstanceProperty; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.testng.annotations.Test; public class TestClusterStatusMonitor { @@ -50,13 +50,11 @@ public MockDataAccessor() { _instances.add(instance); } ZNRecord externalView = - DefaultIdealStateCalculator.calculateIdealState(_instances, _partitions, _replicas, _db, + DefaultTwoStateStrategy.calculateIdealState(_instances, _partitions, _replicas, _db, "MASTER", "SLAVE"); ZNRecord externalView2 = - DefaultIdealStateCalculator.calculateIdealState(_instances, 80, 2, _db2, "MASTER", - "SLAVE"); - + DefaultTwoStateStrategy.calculateIdealState(_instances, 80, 2, _db2, "MASTER", "SLAVE"); } public ZNRecord getProperty(PropertyType type, String resource) { @@ -100,12 +98,11 @@ public void TestReportData() { _liveInstances.add(metaData); } ZNRecord externalView = - DefaultIdealStateCalculator.calculateIdealState(_instances, _partitions, _replicas, _db, + DefaultTwoStateStrategy.calculateIdealState(_instances, _partitions, _replicas, _db, "MASTER", "SLAVE"); ZNRecord externalView2 = - DefaultIdealStateCalculator.calculateIdealState(_instances, 80, 2, "TestDB", "MASTER", - "SLAVE"); + DefaultTwoStateStrategy.calculateIdealState(_instances, 80, 2, "TestDB", "MASTER", "SLAVE"); List externalViews = new ArrayList(); externalViews.add(externalView); diff --git a/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java b/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java index d631dd2e94..6712c40019 100644 --- a/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java +++ b/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java @@ -31,11 +31,11 @@ import org.apache.helix.PropertyType; import org.apache.helix.ZNRecord; import org.apache.helix.PropertyKey.Builder; +import org.apache.helix.controller.strategy.DefaultTwoStateStrategy; import org.apache.helix.model.ExternalView; import org.apache.helix.model.IdealState; import org.apache.helix.model.LiveInstance.LiveInstanceProperty; import org.apache.helix.monitoring.mbeans.ResourceMonitor; -import org.apache.helix.tools.DefaultIdealStateCalculator; import org.testng.AssertJUnit; import org.testng.annotations.Test; @@ -94,8 +94,8 @@ public MockHelixManager() { } _idealState = - DefaultIdealStateCalculator.calculateIdealState(_instances, _partitions, _replicas, - _dbName, "MASTER", "SLAVE"); + DefaultTwoStateStrategy.calculateIdealState(_instances, _partitions, _replicas, _dbName, + "MASTER", "SLAVE"); _externalView = new ZNRecord(_idealState); } From 934bb0f422c385a00a0638594d6e6da2c93b2906 Mon Sep 17 00:00:00 2001 From: zzhang Date: Mon, 26 Aug 2013 16:40:26 -0700 Subject: [PATCH 005/113] [HELIX-214] User-defined rebalancer should never use SEMI_AUTO code paths, rb=13785 --- .../apache/helix/manager/zk/ZKHelixAdmin.java | 8 ++--- .../org/apache/helix/model/IdealState.java | 18 +++++----- .../TestCustomizedIdealStateRebalancer.java | 33 +++++++++++++------ 3 files changed, 34 insertions(+), 25 deletions(-) diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java index 5da5b275d8..754df7be0b 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java @@ -235,8 +235,6 @@ public void enablePartition(final boolean enabled, final String clusterName, for (String partitionName : partitionNames) { if ((idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO && idealState .getPreferenceList(partitionName) == null) - || (idealState.getRebalanceMode() == RebalanceMode.USER_DEFINED && idealState - .getPreferenceList(partitionName) == null) || (idealState.getRebalanceMode() == RebalanceMode.CUSTOMIZED && idealState .getInstanceStateMap(partitionName) == null)) { logger.warn("Cluster: " + clusterName + ", resource: " + resourceName + ", partition: " @@ -1021,14 +1019,14 @@ void rebalance(String clusterName, String resourceName, int replica, String keyP if (masterStateValue == null) { masterStateValue = slaveStateValue; } - if (idealState.getRebalanceMode() != RebalanceMode.FULL_AUTO) { + if (idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO + || idealState.getRebalanceMode() == RebalanceMode.CUSTOMIZED) { ZNRecord newIdealState = DefaultTwoStateStrategy.calculateIdealState(instanceNames, partitions, replica, keyPrefix, masterStateValue, slaveStateValue); // for now keep mapField in SEMI_AUTO mode and remove listField in CUSTOMIZED mode - if (idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO - || idealState.getRebalanceMode() == RebalanceMode.USER_DEFINED) { + if (idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO) { idealState.getRecord().setListFields(newIdealState.getListFields()); idealState.getRecord().setMapFields(newIdealState.getMapFields()); } diff --git a/helix-core/src/main/java/org/apache/helix/model/IdealState.java b/helix-core/src/main/java/org/apache/helix/model/IdealState.java index 59bc59c608..e14940a17e 100644 --- a/helix-core/src/main/java/org/apache/helix/model/IdealState.java +++ b/helix-core/src/main/java/org/apache/helix/model/IdealState.java @@ -30,9 +30,8 @@ import org.apache.helix.HelixConstants; import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; -import org.apache.log4j.Logger; - import org.apache.helix.controller.rebalancer.Rebalancer; +import org.apache.log4j.Logger; /** * The ideal states of all partitions in a resource @@ -207,10 +206,10 @@ public void setPartitionState(String partitionName, String instanceName, String */ public Set getPartitionSet() { if (getRebalanceMode() == RebalanceMode.SEMI_AUTO - || getRebalanceMode() == RebalanceMode.FULL_AUTO - || getRebalanceMode() == RebalanceMode.USER_DEFINED) { + || getRebalanceMode() == RebalanceMode.FULL_AUTO) { return _record.getListFields().keySet(); - } else if (getRebalanceMode() == RebalanceMode.CUSTOMIZED) { + } else if (getRebalanceMode() == RebalanceMode.CUSTOMIZED + || getRebalanceMode() == RebalanceMode.USER_DEFINED) { return _record.getMapFields().keySet(); } else { logger.error("Invalid ideal state mode:" + getResourceName()); @@ -243,8 +242,7 @@ public Map getInstanceStateMap(String partitionName) { */ public Set getInstanceSet(String partitionName) { if (getRebalanceMode() == RebalanceMode.SEMI_AUTO - || getRebalanceMode() == RebalanceMode.FULL_AUTO - || getRebalanceMode() == RebalanceMode.USER_DEFINED) { + || getRebalanceMode() == RebalanceMode.FULL_AUTO) { List prefList = _record.getListField(partitionName); if (prefList != null) { return new TreeSet(prefList); @@ -252,7 +250,8 @@ public Set getInstanceSet(String partitionName) { logger.warn(partitionName + " does NOT exist"); return Collections.emptySet(); } - } else if (getRebalanceMode() == RebalanceMode.CUSTOMIZED) { + } else if (getRebalanceMode() == RebalanceMode.CUSTOMIZED + || getRebalanceMode() == RebalanceMode.USER_DEFINED) { Map stateMap = _record.getMapField(partitionName); if (stateMap != null) { return new TreeSet(stateMap.keySet()); @@ -418,8 +417,7 @@ public boolean isValid() { return false; } - if (getRebalanceMode() == RebalanceMode.SEMI_AUTO - || getRebalanceMode() == RebalanceMode.USER_DEFINED) { + if (getRebalanceMode() == RebalanceMode.SEMI_AUTO) { String replicaStr = getReplicas(); if (replicaStr == null) { logger.error("invalid ideal-state. missing replicas in auto mode. record was: " + _record); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java index 70ff6bd029..55fc876f85 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java @@ -19,7 +19,9 @@ * under the License. */ +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.apache.helix.HelixDataAccessor; @@ -39,6 +41,7 @@ import org.apache.helix.model.Partition; import org.apache.helix.model.Resource; import org.apache.helix.model.ResourceAssignment; +import org.apache.helix.model.StateModelDefinition; import org.apache.helix.tools.ClusterStateVerifier; import org.apache.helix.tools.ClusterStateVerifier.ZkVerifier; import org.testng.Assert; @@ -48,6 +51,7 @@ public class TestCustomizedIdealStateRebalancer extends ZkStandAloneCMTestBaseWithPropertyServerCheck { String db2 = TEST_DB + "2"; static boolean testRebalancerCreated = false; + static boolean testRebalancerInvoked = false; public static class TestRebalancer implements Rebalancer { @@ -56,22 +60,30 @@ public void init(HelixManager manager) { testRebalancerCreated = true; } + /** + * Very basic mapping that evenly assigns one replica of each partition to live nodes, each of + * which is in the highest-priority state. + */ @Override - public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, - CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { + public ResourceAssignment computeResourceMapping(Resource resource, + IdealState currentIdealState, CurrentStateOutput currentStateOutput, + ClusterDataCache clusterData) { + List liveInstances = new ArrayList(clusterData.getLiveInstances().keySet()); + String stateModelName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); ResourceAssignment resourceMapping = new ResourceAssignment(resource.getResourceName()); + int i = 0; for (Partition partition : resource.getPartitions()) { String partitionName = partition.getPartitionName(); - String instance = currentIdealState.getPreferenceList(partitionName).get(0); - currentIdealState.getPreferenceList(partitionName).clear(); - currentIdealState.getPreferenceList(partitionName).add(instance); - + int nodeIndex = i % liveInstances.size(); currentIdealState.getInstanceStateMap(partitionName).clear(); - currentIdealState.getInstanceStateMap(partitionName).put(instance, "MASTER"); + currentIdealState.getInstanceStateMap(partitionName).put(liveInstances.get(nodeIndex), + stateModelDef.getStatesPriorityList().get(0)); resourceMapping.addReplicaMap(partition, currentIdealState.getInstanceStateMap(partitionName)); + i++; } - currentIdealState.setReplicas("1"); + testRebalancerInvoked = true; return resourceMapping; } } @@ -102,10 +114,11 @@ public void testCustomizedIdealStateRebalancer() throws InterruptedException { } IdealState is = accessor.getProperty(keyBuilder.idealStates(db2)); for (String partition : is.getPartitionSet()) { - Assert.assertEquals(is.getPreferenceList(partition).size(), 3); - Assert.assertEquals(is.getInstanceStateMap(partition).size(), 3); + Assert.assertEquals(is.getPreferenceList(partition).size(), 0); + Assert.assertEquals(is.getInstanceStateMap(partition).size(), 0); } Assert.assertTrue(testRebalancerCreated); + Assert.assertTrue(testRebalancerInvoked); } public static class ExternalViewBalancedVerifier implements ZkVerifier { From 7dbb58c563a484728fdd3d7e9564f6bca31e5a57 Mon Sep 17 00:00:00 2001 From: zzhang Date: Tue, 27 Aug 2013 18:03:33 -0700 Subject: [PATCH 006/113] [HELIX-109] Review Helix model package, initial changes --- .../java/org/apache/helix/api/Cluster.java | 174 +++++++++++++++ .../org/apache/helix/api/ClusterAccessor.java | 86 ++++++++ .../org/apache/helix/api/ClusterConfig.java | 24 ++ .../java/org/apache/helix/api/ClusterId.java | 40 ++++ .../org/apache/helix/api/ClusterReader.java | 154 +++++++++++++ .../java/org/apache/helix/api/Controller.java | 73 +++++++ .../apache/helix/api/ControllerAccessor.java | 37 ++++ .../org/apache/helix/api/ControllerId.java | 33 +++ .../java/org/apache/helix/api/CurState.java | 78 +++++++ .../apache/helix/api/CurStateAccessor.java | 45 ++++ .../java/org/apache/helix/api/ExtView.java | 62 ++++++ .../org/apache/helix/api/ExtViewAccessor.java | 45 ++++ .../org/apache/helix/api/HelixVersion.java | 51 +++++ .../main/java/org/apache/helix/api/Id.java | 54 +++++ .../main/java/org/apache/helix/api/Msg.java | 68 ++++++ .../main/java/org/apache/helix/api/MsgId.java | 34 +++ .../org/apache/helix/api/Participant.java | 205 ++++++++++++++++++ .../apache/helix/api/ParticipantAccessor.java | 98 +++++++++ .../org/apache/helix/api/ParticipantId.java | 33 +++ .../java/org/apache/helix/api/Partition.java | 44 ++++ .../org/apache/helix/api/PartitionId.java | 51 +++++ .../java/org/apache/helix/api/ProcId.java | 35 +++ .../apache/helix/api/RebalancerConfig.java | 62 ++++++ .../org/apache/helix/api/RebalancerRef.java | 51 +++++ .../java/org/apache/helix/api/Resource.java | 89 ++++++++ .../apache/helix/api/ResourceAccessor.java | 45 ++++ .../java/org/apache/helix/api/ResourceId.java | 33 +++ .../org/apache/helix/api/RscAssignment.java | 44 ++++ .../org/apache/helix/api/RunningInstance.java | 67 ++++++ .../java/org/apache/helix/api/SessionId.java | 34 +++ .../java/org/apache/helix/api/Spectator.java | 43 ++++ .../org/apache/helix/api/SpectatorId.java | 34 +++ .../main/java/org/apache/helix/api/State.java | 36 +++ .../org/apache/helix/api/StateModelDefId.java | 34 +++ 34 files changed, 2096 insertions(+) create mode 100644 helix-core/src/main/java/org/apache/helix/api/Cluster.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ClusterConfig.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ClusterId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ClusterReader.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Controller.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ControllerId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/CurState.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ExtView.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/HelixVersion.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Id.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Msg.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/MsgId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Participant.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ParticipantId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Partition.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/PartitionId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ProcId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Resource.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/ResourceId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/RscAssignment.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/RunningInstance.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/SessionId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/Spectator.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/SpectatorId.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/State.java create mode 100644 helix-core/src/main/java/org/apache/helix/api/StateModelDefId.java diff --git a/helix-core/src/main/java/org/apache/helix/api/Cluster.java b/helix-core/src/main/java/org/apache/helix/api/Cluster.java new file mode 100644 index 0000000000..5b149d954e --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Cluster.java @@ -0,0 +1,174 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.LiveInstance; +import org.apache.helix.model.InstanceConfig; +import org.apache.helix.model.Message; + +import com.google.common.collect.ImmutableMap; + +/** + * Represent a logical helix cluster + */ +public class Cluster { + private final ClusterId _id; + + /** + * map of resource-id to resource + */ + private final Map _resourceMap; + + /** + * map of participant-id to participant + */ + private final Map _participantMap; + + /** + * map of controller-id to controller + */ + private final Map _controllerMap; + + /** + * map of spectator-id to spectator + */ + private final Map _spectatorMap; + + private final ControllerId _leaderId; + + private final ClusterConfig _config = null; + + // TODO move construct logic to ClusterAccessor + /** + * Construct a cluster + * @param id a unique id for the cluster + * @param idealStateMap map of resource-id to ideal-state + * @param currentStateMap map of resource-id to map of participant-id to current-state + * @param instanceConfigMap map of participant-id to instance-config + * @param liveInstanceMap map of participant-id to live-instance + * @param msgMap map of participant-id to map of message-id to message + * @param leader + */ + public Cluster(ClusterId id, Map idealStateMap, + Map> currentStateMap, + Map instanceConfigMap, Map liveInstanceMap, + Map> msgMap, LiveInstance leader) { + _id = id; + + Map resourceMap = new HashMap(); + for (String resourceId : idealStateMap.keySet()) { + IdealState idealState = idealStateMap.get(resourceId); + Map curStateMap = currentStateMap.get(resourceId); + + // TODO pass resource assignment + resourceMap.put(new ResourceId(resourceId), new Resource(new ResourceId(resourceId), + idealState, null)); + } + _resourceMap = ImmutableMap.copyOf(resourceMap); + + Map participantMap = new HashMap(); + for (String participantId : instanceConfigMap.keySet()) { + InstanceConfig instanceConfig = instanceConfigMap.get(participantId); + LiveInstance liveInstance = liveInstanceMap.get(participantId); + Map instanceMsgMap = msgMap.get(participantId); + + // TODO pass current-state map + participantMap.put(new ParticipantId(participantId), new Participant(new ParticipantId( + participantId), instanceConfig, liveInstance, null, instanceMsgMap)); + } + _participantMap = ImmutableMap.copyOf(participantMap); + + Map controllerMap = new HashMap(); + if (leader != null) { + _leaderId = new ControllerId(leader.getId()); + controllerMap.put(_leaderId, new Controller(_leaderId, leader, true)); + } else { + _leaderId = null; + } + + // TODO impl this when we persist controllers and spectators on zookeeper + _controllerMap = ImmutableMap.copyOf(controllerMap); + _spectatorMap = Collections.emptyMap(); + } + + /** + * Get cluster id + * @return cluster id + */ + public ClusterId getId() { + return _id; + } + + /** + * Get resources in the cluster + * @return a map of resource id to resource, or empty map if none + */ + public Map getResourceMap() { + return _resourceMap; + } + + /** + * Get resource given resource id + * @param resourceId + * @return resource or null if not exist + */ + public Resource getResource(ResourceId resourceId) { + return _resourceMap.get(resourceId); + } + + /** + * Get participants of the cluster + * @return a map of participant id to participant, or empty map if none + */ + public Map getParticipantMap() { + return _participantMap; + } + + /** + * Get controllers of the cluster + * @return a map of controller id to controller, or empty map if none + */ + public Map getControllerMap() { + return _controllerMap; + } + + /** + * Get the leader of the cluster + * @return the leader or null if not exist + */ + public Controller getLeader() { + return _controllerMap.get(_leaderId); + } + + /** + * Get spectators of the cluster + * @return a map of spectator id to spectator, or empty map if none + */ + public Map getSpectatorMap() { + return _spectatorMap; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java new file mode 100644 index 0000000000..1b43e6b03b --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java @@ -0,0 +1,86 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.HelixDataAccessor; + +public class ClusterAccessor { + private final HelixDataAccessor _accessor; + private final ClusterId _clusterId; + + public ClusterAccessor(HelixDataAccessor accessor) { + _accessor = accessor; + _clusterId = null; + } + + /** + * + */ + public void create() { + + } + + /** + * @return + */ + public Cluster read() { + return null; + } + + /** + */ + public void pause() { + + } + + /** + */ + public void resume() { + + } + + /** + * @param resource + */ + public void addResource(Resource resource) { + + } + + /** + * @param resourceId + */ + public void dropResource(ResourceId resourceId) { + + } + + /** + * @param participant + */ + public void addParticipant(Participant participant) { + + } + + /** + * @param participantId + */ + public void dropParticipant(ParticipantId participantId) { + + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterConfig.java b/helix-core/src/main/java/org/apache/helix/api/ClusterConfig.java new file mode 100644 index 0000000000..8a2f6295fa --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ClusterConfig.java @@ -0,0 +1,24 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class ClusterConfig { + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterId.java b/helix-core/src/main/java/org/apache/helix/api/ClusterId.java new file mode 100644 index 0000000000..4f04c41e0e --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ClusterId.java @@ -0,0 +1,40 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * + */ +public class ClusterId extends Id { + final private String _id; + + /** + * @param id + */ + public ClusterId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterReader.java b/helix-core/src/main/java/org/apache/helix/api/ClusterReader.java new file mode 100644 index 0000000000..12a41ac8f0 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ClusterReader.java @@ -0,0 +1,154 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.HashMap; +import java.util.Map; + +import org.apache.helix.HelixDataAccessor; +import org.apache.helix.PropertyKey; +import org.apache.helix.ZNRecord; +import org.apache.helix.manager.zk.ZKHelixDataAccessor; +import org.apache.helix.manager.zk.ZNRecordSerializer; +import org.apache.helix.manager.zk.ZkBaseDataAccessor; +import org.apache.helix.manager.zk.ZkClient; +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.InstanceConfig; +import org.apache.helix.model.LiveInstance; +import org.apache.helix.model.Message; + +/** + * Read a cluster from zookeeper + */ +public class ClusterReader { + final ZkClient _client; + + public ClusterReader(ZkClient client) { + _client = client; + } + + // TODO move to ClusterAccessor + /** + * Read the following znodes from zookeeper and construct a cluster instance + * - all instance-configs + * - all ideal-states + * - all live-instances + * - all messages + * - all current-states + * @param clusterId + * @return cluster or null if not exist + */ + public Cluster readCluster(String clusterId) { + HelixDataAccessor accessor = + new ZKHelixDataAccessor(clusterId, new ZkBaseDataAccessor(_client)); + PropertyKey.Builder keyBuilder = accessor.keyBuilder(); + + /** + * map of instance-id to instance-config + */ + Map instanceConfigMap = + accessor.getChildValuesMap(keyBuilder.instanceConfigs()); + + /** + * map of resource-id to ideal-state + */ + Map idealStateMap = accessor.getChildValuesMap(keyBuilder.idealStates()); + + /** + * map of instance-id to live-instance + */ + Map liveInstanceMap = + accessor.getChildValuesMap(keyBuilder.liveInstances()); + + /** + * map of participant-id to map of message-id to message + */ + Map> messageMap = new HashMap>(); + for (String instanceName : liveInstanceMap.keySet()) { + Map instanceMsgMap = + accessor.getChildValuesMap(keyBuilder.messages(instanceName)); + messageMap.put(instanceName, instanceMsgMap); + } + + /** + * map of resource-id to map of participant-id to current-state + */ + Map> currentStateMap = + new HashMap>(); + for (String participantId : liveInstanceMap.keySet()) { + LiveInstance liveInstance = liveInstanceMap.get(participantId); + String sessionId = liveInstance.getSessionId(); + Map instanceCurStateMap = + accessor.getChildValuesMap(keyBuilder.currentStates(participantId, sessionId)); + + for (String resourceId : instanceCurStateMap.keySet()) { + if (!currentStateMap.containsKey(resourceId)) { + currentStateMap.put(resourceId, new HashMap()); + } + + currentStateMap.get(resourceId).put(participantId, instanceCurStateMap.get(resourceId)); + } + } + + return new Cluster(new ClusterId(clusterId), idealStateMap, currentStateMap, instanceConfigMap, + liveInstanceMap, messageMap, null); + } + + /** + * simple test + * @param args + */ + public static void main(String[] args) { + ZkClient client = + new ZkClient("zzhang-ld", ZkClient.DEFAULT_SESSION_TIMEOUT, + ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer()); + + ClusterReader reader = new ClusterReader(client); + Cluster cluster = reader.readCluster("ESPRESSO_STORAGE"); + + Map participantMap = cluster.getParticipantMap(); + for (ParticipantId participantId : participantMap.keySet()) { + Participant participant = participantMap.get(participantId); + System.out.println(participantId + " - " + participant.isEnabled()); + if (participant.isAlive()) { + System.out.println("\t" + participant.getRunningInstance().getSessionId()); + } + } + + Map resourceMap = cluster.getResourceMap(); + for (ResourceId resourceId : resourceMap.keySet()) { + Resource resource = resourceMap.get(resourceId); + // System.out.println(resourceId + " - " + resource.getStateModelDefId()); + + // TODO fix it + // + // Map curStateMap = resource.getCurrentStateMap(); + // for (ParticipantId participantId : curStateMap.keySet()) { + // System.out.println("\t" + participantId); + // CurState curState = curStateMap.get(participantId); + // for (PartitionId partitionId : curState.getPartitionIdSet()) { + // State state = curState.getState(partitionId); + // System.out.println("\t\t" + partitionId + " - " + state); + // } + // } + } + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Controller.java b/helix-core/src/main/java/org/apache/helix/api/Controller.java new file mode 100644 index 0000000000..df28571a0a --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Controller.java @@ -0,0 +1,73 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.model.LiveInstance; + +/** + * A helix controller + */ +public class Controller { + private final ControllerId _id; + private final RunningInstance _runningInstance; + private final boolean _isLeader; + + /** + * Construct a controller + * @param id + */ + public Controller(ControllerId id, LiveInstance liveInstance, boolean isLeader) { + _id = id; + + if (liveInstance != null) { + _runningInstance = + new RunningInstance(new SessionId(liveInstance.getSessionId()), new HelixVersion( + liveInstance.getHelixVersion()), new ProcId(liveInstance.getLiveInstance())); + } else { + _runningInstance = null; + } + + _isLeader = isLeader; + } + + /** + * Get controller id + * @return controller id + */ + public ControllerId getId() { + return _id; + } + + /** + * Check if the controller is leader + * @return true if leader or false otherwise + */ + public boolean isLeader() { + return _isLeader; + } + + /** + * Get the running instance + * @return running instance or null if not running + */ + public RunningInstance getRunningInstance() { + return _runningInstance; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java new file mode 100644 index 0000000000..fb3f8446a3 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java @@ -0,0 +1,37 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.HelixDataAccessor; + +public class ControllerAccessor { + private final HelixDataAccessor _accessor; + + public ControllerAccessor(HelixDataAccessor accessor) { + _accessor = accessor; + } + + /** + * @param controllerId + */ + public void start(ControllerId controllerId) { + + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ControllerId.java b/helix-core/src/main/java/org/apache/helix/api/ControllerId.java new file mode 100644 index 0000000000..894c342f49 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ControllerId.java @@ -0,0 +1,33 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class ControllerId extends Id { + private final String _id; + + public ControllerId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/CurState.java b/helix-core/src/main/java/org/apache/helix/api/CurState.java new file mode 100644 index 0000000000..e66fb7a346 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/CurState.java @@ -0,0 +1,78 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.helix.model.CurrentState; + +import com.google.common.collect.ImmutableMap; + +/** + * Current state per participant per resource + */ +public class CurState { + private final ResourceId _resourceId; + private final ParticipantId _participantId; + + /** + * map of partition-id to state + */ + final Map _stateMap; + + /** + * Construct current state + * @param resource + * @param participant + * @param currentState + */ + public CurState(ResourceId resourceId, ParticipantId participantId, CurrentState currentState) { + _resourceId = resourceId; + _participantId = participantId; + + Map stateMap = new HashMap(); + Map currentStateMap = currentState.getPartitionStateMap(); + for (String partitionId : currentStateMap.keySet()) { + String state = currentStateMap.get(partitionId); + stateMap.put(new PartitionId(resourceId, PartitionId.stripResourceId(partitionId)), + new State(state)); + } + _stateMap = ImmutableMap.copyOf(stateMap); + } + + /** + * Get current state for a partition + * @param partition-id + * @return state of the partition or null if partition not exist + */ + public State getState(PartitionId partitionId) { + return _stateMap.get(partitionId); + } + + /** + * Get the set of partition-id's in the current state + * @return set of partition-id's or empty set if none + */ + public Set getPartitionIdSet() { + return _stateMap.keySet(); + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java b/helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java new file mode 100644 index 0000000000..1b8e2ce497 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java @@ -0,0 +1,45 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.HelixDataAccessor; + +public class CurStateAccessor { + private final HelixDataAccessor _accessor; + + public CurStateAccessor(HelixDataAccessor accessor) { + _accessor = accessor; + } + + /** + * @param curStateUpdate current state change delta + */ + public void updateCurState(ParticipantId participantId, ResourceId resourceId, + CurState curStateUpdate) { + // accessor.updateProperty() + } + + /** + * + */ + public void drop(ParticipantId participantId, ResourceId resourceId) { + + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ExtView.java b/helix-core/src/main/java/org/apache/helix/api/ExtView.java new file mode 100644 index 0000000000..3a5d4ac040 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ExtView.java @@ -0,0 +1,62 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.Map; + +import com.google.common.collect.ImmutableMap; + +/** + * External view of a resource + */ +public class ExtView { + private final ResourceId _resourceId; + + /** + * map of partition-id to map of participant-id to state + */ + private final RscAssignment _extView; + + /** + * Construct external view + * @param stateMap map of partition-id to map of participant-id to state + */ + public ExtView(ResourceId resourceId, Map> stateMap) { + _resourceId = resourceId; + + // TODO convert to external view + _extView = null; + } + + /** + * Get the state of a partition for a participant + * @param partitionId + * @param participantIds + * @return the state or null if not exist + */ + public State getState(PartitionId partitionId, ParticipantId participantId) { + Map participantStateMap = _extView.getParticipantStateMap(partitionId); + if (participantStateMap != null) { + return participantStateMap.get(participantId); + } + return null; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java new file mode 100644 index 0000000000..41406ff20a --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java @@ -0,0 +1,45 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.HelixDataAccessor; + +public class ExtViewAccessor { + + private final HelixDataAccessor _accessor; + + public ExtViewAccessor(HelixDataAccessor accessor) { + _accessor = accessor; + } + + /** + * @param extView + */ + public void setExternalView(ExtView extView) { + + } + + /** + * + */ + public void drop(ResourceId resourceId) { + + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java b/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java new file mode 100644 index 0000000000..84d0a8f76b --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java @@ -0,0 +1,51 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Helix version (e.g. 0.6.1.5) + */ +public class HelixVersion { + final String _version; + + /** + * Construct with a version string (e.g. 0.6.1.5) + * @param version + */ + public HelixVersion(String version) { + _version = version; + } + + /** + * Get major version (e.g. 6 in 0.6.1.5) + * @return major version number + */ + public String getMajor() { + return null; + } + + /** + * Get minor version (e.g. 1 in 0.6.1.5) + * @return minor version number + */ + public String getMinor() { + return null; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Id.java b/helix-core/src/main/java/org/apache/helix/api/Id.java new file mode 100644 index 0000000000..96ce15ddb1 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Id.java @@ -0,0 +1,54 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * + */ +public abstract class Id implements Comparable { + public abstract String stringify(); + + @Override + public String toString() { + return stringify(); + } + + @Override + public boolean equals(Object that) { + if (that instanceof ClusterId) { + return this.stringify().equals(((Id) that).stringify()); + } + return false; + } + + @Override + public int hashCode() { + return this.stringify().hashCode(); + } + + @Override + public int compareTo(Id that) { + if (that instanceof Id) { + return this.stringify().compareTo(that.stringify()); + } + return -1; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Msg.java b/helix-core/src/main/java/org/apache/helix/api/Msg.java new file mode 100644 index 0000000000..ac7638d3fa --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Msg.java @@ -0,0 +1,68 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.model.Message; + +/** + * Helix message + */ +public class Msg { + private final MsgId _id; + private final SessionId _srcSessionId; + private final SessionId _tgtSessionid; + + // TODO add other message fields + + /** + * Construct a message + * @param message + */ + public Msg(Message message) { + _id = new MsgId(message.getId()); + _srcSessionId = new SessionId(message.getSrcSessionId()); + _tgtSessionid = new SessionId(message.getTgtSessionId()); + } + + /** + * Get message id + * @return message id + */ + public MsgId getId() { + return _id; + } + + /** + * Get sender session id + * @return sender session id + */ + public SessionId getSrcSessionId() { + return _srcSessionId; + } + + /** + * Get receiver session id + * @return receiver session id + */ + public SessionId getTgtSessionId() { + return _tgtSessionid; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/MsgId.java b/helix-core/src/main/java/org/apache/helix/api/MsgId.java new file mode 100644 index 0000000000..88eb4480b2 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/MsgId.java @@ -0,0 +1,34 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class MsgId extends Id { + + private final String _id; + + public MsgId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Participant.java b/helix-core/src/main/java/org/apache/helix/api/Participant.java new file mode 100644 index 0000000000..e3eb68e73b --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Participant.java @@ -0,0 +1,205 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.InstanceConfig; +import org.apache.helix.model.LiveInstance; +import org.apache.helix.model.Message; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + +/** + * A cluster participant + */ +public class Participant { + private final ParticipantId _id; + private final String _hostName; + private final int _port; + private final boolean _isEnabled; + + /** + * set of disabled partition id's + */ + private final Set _disabledPartitionIds; + private final Set _tags; + + private final RunningInstance _runningInstance; + + /** + * map of resource-id to current-state + */ + private final Map _currentStateMap; + + /** + * map of message-id to message + */ + private final Map _messageMap; + + // TODO move this to ParticipantAccessor + /** + * Construct a participant + * @param config + */ + public Participant(ParticipantId id, InstanceConfig config, LiveInstance liveInstance, + Map currentStateMap, Map instanceMsgMap) { + _id = id; + _hostName = config.getHostName(); + + int port = -1; + try { + port = Integer.parseInt(config.getPort()); + } catch (IllegalArgumentException e) { + // keep as -1 + } + if (port < 0 || port > 65535) { + port = -1; + } + _port = port; + _isEnabled = config.getInstanceEnabled(); + + List disabledPartitions = config.getDisabledPartitions(); + if (disabledPartitions == null) { + _disabledPartitionIds = Collections.emptySet(); + } else { + Set disabledPartitionSet = new HashSet(); + for (String partitionId : disabledPartitions) { + disabledPartitionSet.add(new PartitionId(PartitionId.extracResourceId(partitionId), + PartitionId.stripResourceId(partitionId))); + } + _disabledPartitionIds = ImmutableSet.copyOf(disabledPartitionSet); + } + + List tags = config.getTags(); + if (tags == null) { + _tags = Collections.emptySet(); + } else { + _tags = ImmutableSet.copyOf(config.getTags()); + } + + if (liveInstance != null) { + _runningInstance = + new RunningInstance(new SessionId(liveInstance.getSessionId()), new HelixVersion( + liveInstance.getHelixVersion()), new ProcId(liveInstance.getLiveInstance())); + } else { + _runningInstance = null; + } + + // TODO set curstate + // Map curStateMap = new HashMap(); + // if (currentStateMap != null) { + // for (String participantId : currentStateMap.keySet()) { + // CurState curState = + // new CurState(_id, new ParticipantId(participantId), currentStateMap.get(participantId)); + // curStateMap.put(new ParticipantId(participantId), curState); + // } + // } + // _currentStateMap = ImmutableMap.copyOf(curStateMap); + _currentStateMap = null; + + Map msgMap = new HashMap(); + for (String msgId : instanceMsgMap.keySet()) { + Message message = instanceMsgMap.get(msgId); + msgMap.put(new MsgId(msgId), new Msg(message)); + } + _messageMap = ImmutableMap.copyOf(msgMap); + + } + + /** + * Get the host name of the participant + * @return host name, or null if not applicable + */ + public String getHostName() { + return _hostName; + } + + /** + * Get the port of the participant + * @return port number, or -1 if not applicable + */ + public int getPort() { + return _port; + } + + /** + * Get if the participant is enabled + * @return true if enabled or false otherwise + */ + public boolean isEnabled() { + return _isEnabled; + } + + /** + * Get if the participant is alive + * @return true if running or false otherwise + */ + public boolean isAlive() { + return _runningInstance != null; + } + + /** + * Get the running instance + * @return running instance or null if not running + */ + public RunningInstance getRunningInstance() { + return _runningInstance; + } + + /** + * Get disabled partition id's + * @return set of disabled partition id's, or empty set if none + */ + public Set getDisablePartitionIds() { + return _disabledPartitionIds; + } + + /** + * Get tags + * @return set of tags + */ + public Set getTags() { + return _tags; + } + + /** + * Get message map + * @return message map + */ + public Map getMessageMap() { + return _messageMap; + } + + /** + * Get the current states of the resource + * @return map of resource-id to current state, or empty map if none + */ + public Map getCurrentStateMap() { + return _currentStateMap; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java new file mode 100644 index 0000000000..03e0992768 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java @@ -0,0 +1,98 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.Map; +import java.util.Set; + +import org.apache.helix.HelixDataAccessor; + +public class ParticipantAccessor { + private final HelixDataAccessor _accessor; + + public ParticipantAccessor(HelixDataAccessor accessor) { + _accessor = accessor; + } + + /** + * + */ + public void disable(ParticipantId participantId) { + + } + + /** + * + */ + public void enable(ParticipantId participantId) { + + } + + /** + * @param msgs + */ + public void insertMsgs(ParticipantId participantId, SessionId sessionId, Map msgs) { + + } + + /** + * @param msgs + */ + public void updateMsgs(ParticipantId participantId, SessionId sessionId, Map msgs) { + + } + + /** + * @param msgIdSet + */ + public void deleteMsgs(ParticipantId participantId, SessionId sessionId, Set msgIdSet) { + + } + + /** + * @param disablePartitionSet + */ + public void disablePartitions(ParticipantId participantId, Set disablePartitionSet) { + + } + + /** + * @param enablePartitionSet + */ + public void enablePartitions(ParticipantId participantId, Set enablePartitionSet) { + + } + + /** + * create live instance for the participant + * @param participantId + */ + public void start(ParticipantId participantId) { + + } + + /** + * @param participantId + * @return + */ + public Participant read(ParticipantId participantId) { + return null; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ParticipantId.java b/helix-core/src/main/java/org/apache/helix/api/ParticipantId.java new file mode 100644 index 0000000000..cf8a88dc89 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ParticipantId.java @@ -0,0 +1,33 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class ParticipantId extends Id { + private final String _id; + + public ParticipantId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Partition.java b/helix-core/src/main/java/org/apache/helix/api/Partition.java new file mode 100644 index 0000000000..c903493c23 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Partition.java @@ -0,0 +1,44 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * A partition of a resource + */ +public class Partition { + + private final PartitionId _id; + + /** + * Construct a partition + * @param id + */ + public Partition(PartitionId id) { + _id = id; + } + + /** + * Get partition id + * @return partition id + */ + public PartitionId getId() { + return _id; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/PartitionId.java b/helix-core/src/main/java/org/apache/helix/api/PartitionId.java new file mode 100644 index 0000000000..3bec1ad165 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/PartitionId.java @@ -0,0 +1,51 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class PartitionId extends Id { + private final ResourceId _resourceId; + private final String _partitionName; + + public PartitionId(ResourceId resourceId, String partitionName) { + _resourceId = resourceId; + _partitionName = partitionName; + } + + @Override + public String stringify() { + return String.format("%s_%s", _resourceId, _partitionName); + } + + /** + * @param partitionName + * @return + */ + public static String stripResourceId(String partitionName) { + return partitionName.substring(partitionName.lastIndexOf("_") + 1); + } + + /** + * @param partitionName + * @return + */ + public static ResourceId extracResourceId(String partitionName) { + return new ResourceId(partitionName.substring(0, partitionName.lastIndexOf("_"))); + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ProcId.java b/helix-core/src/main/java/org/apache/helix/api/ProcId.java new file mode 100644 index 0000000000..106e5d61ab --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ProcId.java @@ -0,0 +1,35 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class ProcId extends Id { + + private final String _id; + + public ProcId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java b/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java new file mode 100644 index 0000000000..219e86793d --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java @@ -0,0 +1,62 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.model.IdealState.RebalanceMode; + +public class RebalancerConfig { + private final RebalanceMode _rebalancerMode; + private final RebalancerRef _rebalancerRef; + private final StateModelDefId _stateModelDefId; + + private final RscAssignment _resourceAssignment; + + public RebalancerConfig() { + _rebalancerMode = RebalanceMode.NONE; + _rebalancerRef = null; + _stateModelDefId = null; + + _resourceAssignment = null; + } + + /** + * Get the rebalancer mode + * @return rebalancer mode + */ + public RebalanceMode getRebalancerMode() { + return _rebalancerMode; + } + + /** + * Get the rebalancer class name + * @return rebalancer class name or null if not exist + */ + public RebalancerRef getRebalancerRef() { + return _rebalancerRef; + } + + /** + * Get state model definition name of the resource + * @return state model definition + */ + public StateModelDefId getStateModelDefId() { + return _stateModelDefId; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java b/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java new file mode 100644 index 0000000000..9011da979a --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java @@ -0,0 +1,51 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.controller.rebalancer.Rebalancer; +import org.apache.helix.util.HelixUtil; + +public class RebalancerRef { + private final String _rebalancerClassName; + + public RebalancerRef(String rebalancerClassName) { + _rebalancerClassName = rebalancerClassName; + } + + /** + * @return + */ + public Rebalancer getRebalancer() { + try { + return (Rebalancer) (HelixUtil.loadClass(getClass(), _rebalancerClassName).newInstance()); + } catch (InstantiationException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (IllegalAccessException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (ClassNotFoundException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return null; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Resource.java b/helix-core/src/main/java/org/apache/helix/api/Resource.java new file mode 100644 index 0000000000..b76a0f8d2f --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Resource.java @@ -0,0 +1,89 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.helix.controller.rebalancer.Rebalancer; +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.IdealState.RebalanceMode; +import org.apache.helix.model.ResourceAssignment; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + +/** + * Represent a resource entity in helix cluster + */ +public class Resource { + private final ResourceId _id; + private final RebalancerConfig _rebalancerConfig; + + private final Set _partitionSet; + + private final ExtView _externalView; + + // TODO move construct logic to ResourceAccessor + /** + * Construct a resource + * @param idealState + * @param currentStateMap map of participant-id to current state + */ + public Resource(ResourceId id, IdealState idealState, ResourceAssignment rscAssignment) { + _id = id; + // _rebalancerMode = idealState.getRebalanceMode(); + // _rebalancerRef = new RebalancerRef(idealState.getRebalancerClassName()); + // _stateModelDefId = new StateModelDefId(idealState.getStateModelDefRef()); + _rebalancerConfig = null; + + Set partitionSet = new HashSet(); + for (String partitionId : idealState.getPartitionSet()) { + partitionSet + .add(new Partition(new PartitionId(id, PartitionId.stripResourceId(partitionId)))); + } + _partitionSet = ImmutableSet.copyOf(partitionSet); + + // TODO + // _resourceAssignment = null; + + _externalView = null; + } + + /** + * Get the set of partitions of the resource + * @return set of partitions or empty set if none + */ + public Set getPartitionSet() { + return _partitionSet; + } + + /** + * Get the external view of the resource + * @return the external view of the resource + */ + public ExtView getExternalView() { + return _externalView; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java new file mode 100644 index 0000000000..b5a6516fea --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java @@ -0,0 +1,45 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.HelixDataAccessor; + +public class ResourceAccessor { + + private final HelixDataAccessor _accessor; + + public ResourceAccessor(HelixDataAccessor accessor) { + _accessor = accessor; + } + + /** + * + */ + public void setRresourceAssignment(ResourceId resourceId, RscAssignment resourceAssignment) { + + } + + /** + * + */ + public void setRebalancerConfig(RebalancerConfig config) { + + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/ResourceId.java b/helix-core/src/main/java/org/apache/helix/api/ResourceId.java new file mode 100644 index 0000000000..0b10290754 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/ResourceId.java @@ -0,0 +1,33 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class ResourceId extends Id { + private final String _id; + + @Override + public String stringify() { + return _id; + } + + public ResourceId(String id) { + _id = id; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java b/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java new file mode 100644 index 0000000000..88e4ff6d94 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java @@ -0,0 +1,44 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.HashMap; +import java.util.Map; + +import org.apache.helix.model.ResourceAssignment; + +import com.google.common.collect.ImmutableMap; + +public class RscAssignment { + private final Map> _resourceAssignment; + + public RscAssignment(ResourceAssignment rscAssignment) { + Map> resourceAssignment = + new HashMap>(); + + // TODO fill the map + + _resourceAssignment = ImmutableMap.copyOf(resourceAssignment); + } + + public Map getParticipantStateMap(PartitionId partitionId) { + return _resourceAssignment.get(partitionId); + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java b/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java new file mode 100644 index 0000000000..49c5ccfca1 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java @@ -0,0 +1,67 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * A running attributes of a helix instance + */ +public class RunningInstance { + private final SessionId _sessionId; + private final HelixVersion _version; + private final ProcId _pid; + + /** + * Construct running instance + * @param sessionId zookeeper session-id + * @param version helix-version + * @param pid running jvm name + */ + public RunningInstance(SessionId sessionId, HelixVersion version, ProcId pid) { + _sessionId = sessionId; + _version = version; + _pid = pid; + } + + /** + * Get session id of the running instance + * session id is the zookeeper session id + * @return session id + */ + public SessionId getSessionId() { + return _sessionId; + } + + /** + * Get helix version of the running instance + * @return helix version + */ + public HelixVersion getVersion() { + return _version; + } + + /** + * Get the name of the running jvm of the running instance + * @return running jvm name (e.g. 1111@host) + */ + public ProcId getPid() { + return _pid; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/SessionId.java b/helix-core/src/main/java/org/apache/helix/api/SessionId.java new file mode 100644 index 0000000000..1bc1c32aa4 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/SessionId.java @@ -0,0 +1,34 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class SessionId extends Id { + private final String _id; + + public SessionId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/Spectator.java b/helix-core/src/main/java/org/apache/helix/api/Spectator.java new file mode 100644 index 0000000000..e25601b29d --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/Spectator.java @@ -0,0 +1,43 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * A cluster spectator that listen on cluster changes + */ +public class Spectator { + private final SpectatorId _id; + + /** + * Construct a spectator with id + * @param id + */ + public Spectator(SpectatorId id) { + _id = id; + } + + /** + * Spectator id + * @return spectator id + */ + public SpectatorId getId() { + return _id; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/SpectatorId.java b/helix-core/src/main/java/org/apache/helix/api/SpectatorId.java new file mode 100644 index 0000000000..689cad6b1b --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/SpectatorId.java @@ -0,0 +1,34 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class SpectatorId extends Id { + private final String _id; + + public SpectatorId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } + +} diff --git a/helix-core/src/main/java/org/apache/helix/api/State.java b/helix-core/src/main/java/org/apache/helix/api/State.java new file mode 100644 index 0000000000..b2000f2b0a --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/State.java @@ -0,0 +1,36 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * + */ +public class State { + private final String _state; + + public State(String state) { + _state = state; + } + + @Override + public String toString() { + return _state; + } +} diff --git a/helix-core/src/main/java/org/apache/helix/api/StateModelDefId.java b/helix-core/src/main/java/org/apache/helix/api/StateModelDefId.java new file mode 100644 index 0000000000..0ac4cb91bd --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/api/StateModelDefId.java @@ -0,0 +1,34 @@ +package org.apache.helix.api; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class StateModelDefId extends Id { + private final String _id; + + public StateModelDefId(String id) { + _id = id; + } + + @Override + public String stringify() { + return _id; + } + +} From a019f3b26b799a0c4b2d0afc69d5c10a83ff1367 Mon Sep 17 00:00:00 2001 From: zzhang Date: Wed, 28 Aug 2013 13:24:10 -0700 Subject: [PATCH 007/113] [HELIX-225] fix helix-examples package build error, rb=13835 --- .../src/main/config/log4j.properties | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 helix-examples/src/main/config/log4j.properties diff --git a/helix-examples/src/main/config/log4j.properties b/helix-examples/src/main/config/log4j.properties new file mode 100644 index 0000000000..4b3dc31577 --- /dev/null +++ b/helix-examples/src/main/config/log4j.properties @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=ERROR,A1 + +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n + +log4j.logger.org.I0Itec=ERROR +log4j.logger.org.apache=ERROR From 433b0011655c0c42228416488bb6b16f4b2f2700 Mon Sep 17 00:00:00 2001 From: zzhang Date: Wed, 28 Aug 2013 23:17:26 -0700 Subject: [PATCH 008/113] [HELIX-109] Review Helix model package, merge model changes with accessor changes, incomplete, rb=13878 --- .../resources/SchedulerTasksResource.java | 12 +- .../helix/tools/TestResetPartitionState.java | 6 +- .../apache/helix/agent/AgentStateModel.java | 8 +- .../java/org/apache/helix/PropertyKey.java | 18 ++ .../java/org/apache/helix/api/Cluster.java | 50 +--- .../org/apache/helix/api/ClusterAccessor.java | 202 ++++++++++++-- .../org/apache/helix/api/ClusterReader.java | 154 ---------- .../java/org/apache/helix/api/Controller.java | 4 +- .../apache/helix/api/ControllerAccessor.java | 3 +- .../java/org/apache/helix/api/CurState.java | 2 +- .../apache/helix/api/CurStateAccessor.java | 45 --- .../org/apache/helix/api/ExtViewAccessor.java | 45 --- .../org/apache/helix/api/HelixVersion.java | 14 + .../main/java/org/apache/helix/api/Id.java | 84 ++++++ .../helix/api/{MsgId.java => MessageId.java} | 4 +- .../main/java/org/apache/helix/api/Msg.java | 10 +- .../org/apache/helix/api/Participant.java | 206 +++++++++----- .../apache/helix/api/ParticipantAccessor.java | 263 ++++++++++++++++-- .../org/apache/helix/api/PartitionId.java | 2 +- .../apache/helix/api/RebalancerConfig.java | 77 ++++- .../org/apache/helix/api/RebalancerRef.java | 21 ++ .../java/org/apache/helix/api/Resource.java | 87 +++++- .../apache/helix/api/ResourceAccessor.java | 31 ++- .../org/apache/helix/api/RscAssignment.java | 98 ++++++- .../org/apache/helix/api/RunningInstance.java | 1 - .../main/java/org/apache/helix/api/State.java | 20 ++ .../controller/GenericHelixController.java | 2 +- .../controller/rebalancer/AutoRebalancer.java | 2 +- .../rebalancer/CustomRebalancer.java | 2 +- .../util/ConstraintBasedAssignment.java | 8 +- .../controller/stages/ClusterDataCache.java | 4 +- .../stages/CompatibilityCheckStage.java | 2 +- .../stages/CurrentStateComputationStage.java | 12 +- .../stages/ExternalViewComputeStage.java | 4 +- .../stages/MessageGenerationPhase.java | 6 +- .../stages/MessageSelectionStage.java | 14 +- .../stages/RebalanceIdealStateStage.java | 2 +- .../stages/ResourceComputationStage.java | 10 +- .../stages/TaskAssignmentStage.java | 10 +- .../helix/manager/zk/ControllerManager.java | 2 +- .../manager/zk/CurStateCarryOverUpdater.java | 2 +- ...efaultControllerMessageHandlerFactory.java | 8 +- ...ParticipantErrorMessageHandlerFactory.java | 4 +- ...DefaultSchedulerMessageHandlerFactory.java | 38 +-- .../zk/DistributedControllerManager.java | 2 +- .../manager/zk/DistributedLeaderElection.java | 2 +- .../manager/zk/ParticipantManagerHelper.java | 6 +- .../apache/helix/manager/zk/ZKHelixAdmin.java | 10 +- .../helix/manager/zk/ZKHelixDataAccessor.java | 2 +- .../helix/manager/zk/ZKHelixManager.java | 2 +- .../apache/helix/messaging/AsyncCallback.java | 2 +- .../messaging/DefaultMessagingService.java | 2 +- .../handling/AsyncCallbackService.java | 12 +- .../handling/HelixStateTransitionHandler.java | 24 +- .../helix/messaging/handling/HelixTask.java | 26 +- .../messaging/handling/HelixTaskExecutor.java | 12 +- .../handling/MessageTimeoutTask.java | 2 +- .../helix/model/ClusterConstraints.java | 4 +- .../org/apache/helix/model/CurrentState.java | 61 +++- .../org/apache/helix/model/ExternalView.java | 45 ++- .../org/apache/helix/model/IdealState.java | 96 ++++++- .../apache/helix/model/InstanceConfig.java | 10 + .../org/apache/helix/model/LiveInstance.java | 45 ++- .../java/org/apache/helix/model/Message.java | 117 +++++++- .../helix/model/ResourceAssignment.java | 8 + .../helix/model/StateModelDefinition.java | 59 +++- .../org/apache/helix/model/Transition.java | 10 + .../model/builder/CurrentStateBuilder.java | 123 ++++++++ .../model/builder/IdealStateBuilder.java | 19 ++ .../monitoring/mbeans/ResourceMonitor.java | 2 +- .../DistClusterControllerElection.java | 2 +- .../participant/HelixStateMachineEngine.java | 6 +- .../helix/spectator/RoutingTableProvider.java | 2 +- .../org/apache/helix/tools/ZkLogAnalyzer.java | 6 +- .../org/apache/helix/util/RebalanceUtil.java | 8 +- .../apache/helix/util/StatusUpdateUtil.java | 26 +- .../java/org/apache/helix/ZkUnitTestBase.java | 4 +- .../stages/TestMsgSelectionStage.java | 2 +- .../stages/TestRebalancePipeline.java | 20 +- .../stages/TestResourceComputationStage.java | 2 +- .../helix/healthcheck/TestAddDropAlert.java | 4 +- .../helix/healthcheck/TestExpandAlert.java | 4 +- .../helix/healthcheck/TestSimpleAlert.java | 4 +- .../healthcheck/TestSimpleWildcardAlert.java | 4 +- .../helix/healthcheck/TestStalenessAlert.java | 4 +- .../helix/healthcheck/TestWildcardAlert.java | 4 +- .../helix/integration/TestAutoRebalance.java | 2 +- .../TestAutoRebalancePartitionLimit.java | 12 +- .../integration/TestCleanupExternalView.java | 4 +- .../TestCustomizedIdealStateRebalancer.java | 10 +- .../apache/helix/integration/TestDrop.java | 2 +- .../TestEnablePartitionDuringDisable.java | 4 +- .../integration/TestHelixInstanceTag.java | 4 +- .../TestMessagePartitionStateMismatch.java | 6 +- .../integration/TestMessagingService.java | 2 +- .../integration/TestResetPartitionState.java | 6 +- .../integration/TestSchedulerMessage.java | 18 +- .../TestStateTransitionTimeout.java | 8 +- .../helix/integration/TestStatusUpdate.java | 2 +- .../manager/zk/TestZkClusterManager.java | 10 +- .../helix/messaging/TestAsyncCallbackSvc.java | 10 +- .../handling/TestHelixTaskExecutor.java | 8 +- .../helix/mock/participant/ErrTransition.java | 4 +- .../apache/helix/tools/TestHelixAdminCli.java | 6 +- .../src/main/config/log4j.properties | 31 +++ .../MasterSlaveStateModelFactory.java | 16 +- .../org/apache/helix/examples/Quickstart.java | 2 +- .../helix/lockmanager/LockManagerDemo.java | 2 +- .../helix/filestore/FileStoreStateModel.java | 24 +- .../org/apache/helix/taskexecution/Task.java | 2 +- 110 files changed, 1889 insertions(+), 710 deletions(-) delete mode 100644 helix-core/src/main/java/org/apache/helix/api/ClusterReader.java delete mode 100644 helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java delete mode 100644 helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java rename helix-core/src/main/java/org/apache/helix/api/{MsgId.java => MessageId.java} (93%) create mode 100644 helix-core/src/main/java/org/apache/helix/model/builder/CurrentStateBuilder.java create mode 100644 helix-examples/src/main/config/log4j.properties diff --git a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/SchedulerTasksResource.java b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/SchedulerTasksResource.java index 59d9174ad8..2e48de4e2c 100644 --- a/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/SchedulerTasksResource.java +++ b/helix-admin-webapp/src/main/java/org/apache/helix/webapp/resources/SchedulerTasksResource.java @@ -117,7 +117,7 @@ StringRepresentation getSchedulerTasksRepresentation() throws JsonGenerationExce ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName)); - String sessionId = liveInstance.getSessionId(); + String sessionId = liveInstance.getSessionIdString(); StringRepresentation representation = new StringRepresentation("");// (ClusterRepresentationUtil.ObjectToJson(instanceConfigs), // MediaType.APPLICATION_JSON); @@ -158,7 +158,7 @@ public void acceptRepresentation(Representation entity) { schedulerMessage.getRecord().getMapFields().put(MESSAGETEMPLATE, messageTemplate); - schedulerMessage.setTgtSessionId(leader.getSessionId()); + schedulerMessage.setTgtSessionId(leader.getSessionIdString()); schedulerMessage.setTgtName("CONTROLLER"); schedulerMessage.setSrcInstanceType(InstanceType.CONTROLLER); String taskQueueName = @@ -167,22 +167,22 @@ public void acceptRepresentation(Representation entity) { schedulerMessage.getRecord().setSimpleField( DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE, taskQueueName); } - accessor.setProperty(accessor.keyBuilder().controllerMessage(schedulerMessage.getMsgId()), + accessor.setProperty(accessor.keyBuilder().controllerMessage(schedulerMessage.getMsgIdString()), schedulerMessage); Map resultMap = new HashMap(); resultMap.put("StatusUpdatePath", PropertyPathConfig.getPath( PropertyType.STATUSUPDATES_CONTROLLER, clusterName, MessageType.SCHEDULER_MSG.toString(), - schedulerMessage.getMsgId())); + schedulerMessage.getMsgIdString())); resultMap.put("MessageType", Message.MessageType.SCHEDULER_MSG.toString()); - resultMap.put("MsgId", schedulerMessage.getMsgId()); + resultMap.put("MsgId", schedulerMessage.getMsgIdString()); // Assemble the rest URL for task status update String ipAddress = InetAddress.getLocalHost().getCanonicalHostName(); String url = "http://" + ipAddress + ":" + getContext().getAttributes().get(RestAdminApplication.PORT) + "/clusters/" + clusterName + "/Controller/statusUpdates/SCHEDULER_MSG/" - + schedulerMessage.getMsgId(); + + schedulerMessage.getMsgIdString(); resultMap.put("statusUpdateUrl", url); getResponse().setEntity(ClusterRepresentationUtil.ObjectToJson(resultMap), diff --git a/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java index c8099a4e76..c3980bf7ed 100644 --- a/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java +++ b/helix-admin-webapp/src/test/java/org/apache/helix/tools/TestResetPartitionState.java @@ -67,8 +67,8 @@ public ErrTransitionWithResetCnt(Map> errPartitions) { @Override public void doTransition(Message message, NotificationContext context) { super.doTransition(message, context); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); if (fromState.equals("ERROR") && toState.equals("OFFLINE")) { // System.err.println("doReset() invoked"); _errToOfflineInvoked.incrementAndGet(); @@ -191,7 +191,7 @@ private void clearStatusUpdate(String clusterName, String instance, String resou Builder keyBuilder = accessor.keyBuilder(); LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance)); - accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), + accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionIdString(), resource, partition)); } diff --git a/helix-agent/src/main/java/org/apache/helix/agent/AgentStateModel.java b/helix-agent/src/main/java/org/apache/helix/agent/AgentStateModel.java index 313f43069f..393da23efa 100644 --- a/helix-agent/src/main/java/org/apache/helix/agent/AgentStateModel.java +++ b/helix-agent/src/main/java/org/apache/helix/agent/AgentStateModel.java @@ -71,8 +71,8 @@ public void genericStateTransitionHandler(Message message, NotificationContext c HelixManager manager = context.getManager(); String clusterName = manager.getClusterName(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); // construct keys for command-config String cmdKey = buildKey(fromState, toState, CommandAttribute.COMMAND); @@ -112,8 +112,8 @@ public void genericStateTransitionHandler(Message message, NotificationContext c } if (cmd == null) { - throw new Exception("Unable to find command for transition from:" + message.getFromState() - + " to:" + message.getToState()); + throw new Exception("Unable to find command for transition from:" + message.getFromStateString() + + " to:" + message.getToStateString()); } _logger.info("Executing command: " + cmd + ", using workingDir: " + workingDir + ", timeout: " + timeout + ", on " + manager.getInstanceName()); diff --git a/helix-core/src/main/java/org/apache/helix/PropertyKey.java b/helix-core/src/main/java/org/apache/helix/PropertyKey.java index 0874958fad..2f2031963c 100644 --- a/helix-core/src/main/java/org/apache/helix/PropertyKey.java +++ b/helix-core/src/main/java/org/apache/helix/PropertyKey.java @@ -304,6 +304,14 @@ public PropertyKey instances() { return new PropertyKey(PropertyType.INSTANCES, null, _clusterName); } + /** + * Get a property key associated with all instances + * @return {@link PropertyKey} + */ + public PropertyKey instance(String instanceName) { + return new PropertyKey(PropertyType.INSTANCES, null, _clusterName, instanceName); + } + /** * Get a property key associated with {@link Message} for an instance * @param instanceName @@ -332,6 +340,16 @@ public PropertyKey sessions(String instanceName) { return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName); } + /** + * Get a property key associated with {@link CurrentState} of an instance and session + * @param instanceName + * @param sessionId + * @return {@link PropertyKey} + */ + public PropertyKey currentStates(String instanceName) { + return new PropertyKey(CURRENTSTATES, CurrentState.class, _clusterName, instanceName); + } + /** * Get a property key associated with {@link CurrentState} of an instance and session * @param instanceName diff --git a/helix-core/src/main/java/org/apache/helix/api/Cluster.java b/helix-core/src/main/java/org/apache/helix/api/Cluster.java index 5b149d954e..07deca601c 100644 --- a/helix-core/src/main/java/org/apache/helix/api/Cluster.java +++ b/helix-core/src/main/java/org/apache/helix/api/Cluster.java @@ -61,53 +61,25 @@ public class Cluster { private final ClusterConfig _config = null; - // TODO move construct logic to ClusterAccessor /** - * Construct a cluster - * @param id a unique id for the cluster - * @param idealStateMap map of resource-id to ideal-state - * @param currentStateMap map of resource-id to map of participant-id to current-state - * @param instanceConfigMap map of participant-id to instance-config - * @param liveInstanceMap map of participant-id to live-instance - * @param msgMap map of participant-id to map of message-id to message - * @param leader + * construct a cluster + * @param id + * @param resourceMap + * @param participantMap + * @param controllerMap + * @param leaderId */ - public Cluster(ClusterId id, Map idealStateMap, - Map> currentStateMap, - Map instanceConfigMap, Map liveInstanceMap, - Map> msgMap, LiveInstance leader) { - _id = id; + public Cluster(ClusterId id, Map resourceMap, + Map participantMap, Map controllerMap, + ControllerId leaderId) { - Map resourceMap = new HashMap(); - for (String resourceId : idealStateMap.keySet()) { - IdealState idealState = idealStateMap.get(resourceId); - Map curStateMap = currentStateMap.get(resourceId); + _id = id; - // TODO pass resource assignment - resourceMap.put(new ResourceId(resourceId), new Resource(new ResourceId(resourceId), - idealState, null)); - } _resourceMap = ImmutableMap.copyOf(resourceMap); - Map participantMap = new HashMap(); - for (String participantId : instanceConfigMap.keySet()) { - InstanceConfig instanceConfig = instanceConfigMap.get(participantId); - LiveInstance liveInstance = liveInstanceMap.get(participantId); - Map instanceMsgMap = msgMap.get(participantId); - - // TODO pass current-state map - participantMap.put(new ParticipantId(participantId), new Participant(new ParticipantId( - participantId), instanceConfig, liveInstance, null, instanceMsgMap)); - } _participantMap = ImmutableMap.copyOf(participantMap); - Map controllerMap = new HashMap(); - if (leader != null) { - _leaderId = new ControllerId(leader.getId()); - controllerMap.put(_leaderId, new Controller(_leaderId, leader, true)); - } else { - _leaderId = null; - } + _leaderId = leaderId; // TODO impl this when we persist controllers and spectators on zookeeper _controllerMap = ImmutableMap.copyOf(controllerMap); diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java index 1b43e6b03b..ac3b7153f2 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java @@ -19,68 +19,234 @@ * under the License. */ +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import org.apache.helix.HelixDataAccessor; +import org.apache.helix.HelixException; +import org.apache.helix.PropertyKey; +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.InstanceConfig; +import org.apache.helix.model.LiveInstance; +import org.apache.helix.model.Message; +import org.apache.helix.model.PauseSignal; public class ClusterAccessor { private final HelixDataAccessor _accessor; + private final PropertyKey.Builder _keyBuilder; private final ClusterId _clusterId; - public ClusterAccessor(HelixDataAccessor accessor) { + public ClusterAccessor(ClusterId clusterId, HelixDataAccessor accessor) { _accessor = accessor; - _clusterId = null; + _keyBuilder = accessor.keyBuilder(); + _clusterId = clusterId; } /** - * + * create a new cluster */ - public void create() { - + public void createCluster() { + List createKeys = new ArrayList(); + + createKeys.add(_keyBuilder.idealStates()); + createKeys.add(_keyBuilder.clusterConfigs()); + createKeys.add(_keyBuilder.instanceConfigs()); + createKeys.add(_keyBuilder.resourceConfigs()); + createKeys.add(_keyBuilder.instances()); + createKeys.add(_keyBuilder.liveInstances()); + createKeys.add(_keyBuilder.externalViews()); + createKeys.add(_keyBuilder.controller()); + + // TODO add controller sub-dir's and state model definitions + for (PropertyKey key : createKeys) { + _accessor.createProperty(key, null); + } } /** - * @return + * read entire cluster data + * @return cluster */ - public Cluster read() { - return null; + public Cluster readCluster() { + /** + * map of instance-id to instance-config + */ + Map instanceConfigMap = + _accessor.getChildValuesMap(_keyBuilder.instanceConfigs()); + + /** + * map of resource-id to ideal-state + */ + Map idealStateMap = _accessor.getChildValuesMap(_keyBuilder.idealStates()); + + /** + * map of instance-id to live-instance + */ + Map liveInstanceMap = + _accessor.getChildValuesMap(_keyBuilder.liveInstances()); + + /** + * map of participant-id to map of message-id to message + */ + Map> messageMap = new HashMap>(); + for (String instanceName : liveInstanceMap.keySet()) { + Map instanceMsgMap = + _accessor.getChildValuesMap(_keyBuilder.messages(instanceName)); + messageMap.put(instanceName, instanceMsgMap); + } + + /** + * map of participant-id to map of resource-id to current-state + */ + Map> currentStateMap = + new HashMap>(); + for (String participantId : liveInstanceMap.keySet()) { + LiveInstance liveInstance = liveInstanceMap.get(participantId); + SessionId sessionId = liveInstance.getSessionId(); + Map instanceCurStateMap = + _accessor.getChildValuesMap(_keyBuilder.currentStates(participantId, + sessionId.stringify())); + + currentStateMap.put(participantId, instanceCurStateMap); + } + + LiveInstance leader = _accessor.getProperty(_keyBuilder.controllerLeader()); + + Map resourceMap = new HashMap(); + for (String resourceName : idealStateMap.keySet()) { + IdealState idealState = idealStateMap.get(resourceName); + + // TODO pass resource assignment + ResourceId resourceId = new ResourceId(resourceName); + resourceMap.put(resourceId, new Resource(resourceId, idealState, null)); + } + + Map participantMap = new HashMap(); + for (String participantName : instanceConfigMap.keySet()) { + InstanceConfig instanceConfig = instanceConfigMap.get(participantName); + LiveInstance liveInstance = liveInstanceMap.get(participantName); + Map instanceMsgMap = messageMap.get(participantName); + + // TODO pass current-state map + ParticipantId participantId = new ParticipantId(participantName); + + // TODO construct participant + participantMap.put(participantId, new Participant(participantId, null, -1, false, null, null, + null, null, null)); + } + + Map controllerMap = new HashMap(); + ControllerId leaderId = null; + if (leader != null) { + leaderId = new ControllerId(leader.getId()); + controllerMap.put(leaderId, new Controller(leaderId, leader, true)); + } + + return new Cluster(_clusterId, resourceMap, participantMap, controllerMap, leaderId); } /** + * pause controller of cluster */ - public void pause() { - + public void pauseCluster() { + _accessor.createProperty(_keyBuilder.pause(), new PauseSignal("pause")); } /** + * resume controller of cluster */ public void resume() { - + _accessor.removeProperty(_keyBuilder.pause()); } /** + * add a resource to cluster * @param resource */ - public void addResource(Resource resource) { - + public void addResourceToCluster(Resource resource) { + StateModelDefId stateModelDefId = resource.getRebalancerConfig().getStateModelDefId(); + if (_accessor.getProperty(_keyBuilder.stateModelDef(stateModelDefId.stringify())) == null) { + throw new HelixException("State model: " + stateModelDefId + " not found in cluster: " + + _clusterId); + } + + ResourceId resourceId = resource.getId(); + if (_accessor.getProperty(_keyBuilder.idealStates(resourceId.stringify())) != null) { + throw new HelixException("Skip adding resource: " + resourceId + + " . Resource ideal state already exists in cluster: " + _clusterId); + } + + // TODO convert rebalancerConfig to idealState + _accessor.createProperty(_keyBuilder.idealStates(resourceId.stringify()), null); } /** + * drop a resource from cluster * @param resourceId */ - public void dropResource(ResourceId resourceId) { + public void dropResourceFromCluster(ResourceId resourceId) { + // TODO check existence + _accessor.removeProperty(_keyBuilder.idealStates(resourceId.stringify())); + _accessor.removeProperty(_keyBuilder.resourceConfig(resourceId.stringify())); + } + /** + * check if cluster structure is valid + * @return true if valid or false otherwise + */ + public boolean isClusterStructureValid() { + // TODO impl this + return false; } /** + * add a participant to cluster * @param participant */ - public void addParticipant(Participant participant) { - + public void addParticipantToCluster(Participant participant) { + if (!isClusterStructureValid()) { + throw new HelixException("Cluster: " + _clusterId + " structure is not valid"); + } + + ParticipantId participantId = participant.getId(); + if (_accessor.getProperty(_keyBuilder.instanceConfig(participantId.stringify())) != null) { + throw new HelixException("Config for participant: " + participantId + + " already exists in cluster: " + _clusterId); + } + + List createKeys = new ArrayList(); + createKeys.add(_keyBuilder.instanceConfig(participantId.stringify())); + createKeys.add(_keyBuilder.messages(participantId.stringify())); + createKeys.add(_keyBuilder.currentStates(participantId.stringify())); + // TODO add participant error and status-update paths + + for (PropertyKey key : createKeys) { + _accessor.createProperty(key, null); + } } /** + * drop a participant from cluster * @param participantId */ - public void dropParticipant(ParticipantId participantId) { - + public void dropParticipantFromCluster(ParticipantId participantId) { + if (_accessor.getProperty(_keyBuilder.instanceConfig(participantId.stringify())) == null) { + throw new HelixException("Config for participant: " + participantId + + " does NOT exist in cluster: " + _clusterId); + } + + if (_accessor.getProperty(_keyBuilder.instance(participantId.stringify())) == null) { + throw new HelixException("Participant: " + participantId + + " structure does NOT exist in cluster: " + _clusterId); + } + + // delete participant config path + _accessor.removeProperty(_keyBuilder.instanceConfig(participantId.stringify())); + + // delete participant path + _accessor.removeProperty(_keyBuilder.instance(participantId.stringify())); } } diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterReader.java b/helix-core/src/main/java/org/apache/helix/api/ClusterReader.java deleted file mode 100644 index 12a41ac8f0..0000000000 --- a/helix-core/src/main/java/org/apache/helix/api/ClusterReader.java +++ /dev/null @@ -1,154 +0,0 @@ -package org.apache.helix.api; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import java.util.HashMap; -import java.util.Map; - -import org.apache.helix.HelixDataAccessor; -import org.apache.helix.PropertyKey; -import org.apache.helix.ZNRecord; -import org.apache.helix.manager.zk.ZKHelixDataAccessor; -import org.apache.helix.manager.zk.ZNRecordSerializer; -import org.apache.helix.manager.zk.ZkBaseDataAccessor; -import org.apache.helix.manager.zk.ZkClient; -import org.apache.helix.model.CurrentState; -import org.apache.helix.model.IdealState; -import org.apache.helix.model.InstanceConfig; -import org.apache.helix.model.LiveInstance; -import org.apache.helix.model.Message; - -/** - * Read a cluster from zookeeper - */ -public class ClusterReader { - final ZkClient _client; - - public ClusterReader(ZkClient client) { - _client = client; - } - - // TODO move to ClusterAccessor - /** - * Read the following znodes from zookeeper and construct a cluster instance - * - all instance-configs - * - all ideal-states - * - all live-instances - * - all messages - * - all current-states - * @param clusterId - * @return cluster or null if not exist - */ - public Cluster readCluster(String clusterId) { - HelixDataAccessor accessor = - new ZKHelixDataAccessor(clusterId, new ZkBaseDataAccessor(_client)); - PropertyKey.Builder keyBuilder = accessor.keyBuilder(); - - /** - * map of instance-id to instance-config - */ - Map instanceConfigMap = - accessor.getChildValuesMap(keyBuilder.instanceConfigs()); - - /** - * map of resource-id to ideal-state - */ - Map idealStateMap = accessor.getChildValuesMap(keyBuilder.idealStates()); - - /** - * map of instance-id to live-instance - */ - Map liveInstanceMap = - accessor.getChildValuesMap(keyBuilder.liveInstances()); - - /** - * map of participant-id to map of message-id to message - */ - Map> messageMap = new HashMap>(); - for (String instanceName : liveInstanceMap.keySet()) { - Map instanceMsgMap = - accessor.getChildValuesMap(keyBuilder.messages(instanceName)); - messageMap.put(instanceName, instanceMsgMap); - } - - /** - * map of resource-id to map of participant-id to current-state - */ - Map> currentStateMap = - new HashMap>(); - for (String participantId : liveInstanceMap.keySet()) { - LiveInstance liveInstance = liveInstanceMap.get(participantId); - String sessionId = liveInstance.getSessionId(); - Map instanceCurStateMap = - accessor.getChildValuesMap(keyBuilder.currentStates(participantId, sessionId)); - - for (String resourceId : instanceCurStateMap.keySet()) { - if (!currentStateMap.containsKey(resourceId)) { - currentStateMap.put(resourceId, new HashMap()); - } - - currentStateMap.get(resourceId).put(participantId, instanceCurStateMap.get(resourceId)); - } - } - - return new Cluster(new ClusterId(clusterId), idealStateMap, currentStateMap, instanceConfigMap, - liveInstanceMap, messageMap, null); - } - - /** - * simple test - * @param args - */ - public static void main(String[] args) { - ZkClient client = - new ZkClient("zzhang-ld", ZkClient.DEFAULT_SESSION_TIMEOUT, - ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer()); - - ClusterReader reader = new ClusterReader(client); - Cluster cluster = reader.readCluster("ESPRESSO_STORAGE"); - - Map participantMap = cluster.getParticipantMap(); - for (ParticipantId participantId : participantMap.keySet()) { - Participant participant = participantMap.get(participantId); - System.out.println(participantId + " - " + participant.isEnabled()); - if (participant.isAlive()) { - System.out.println("\t" + participant.getRunningInstance().getSessionId()); - } - } - - Map resourceMap = cluster.getResourceMap(); - for (ResourceId resourceId : resourceMap.keySet()) { - Resource resource = resourceMap.get(resourceId); - // System.out.println(resourceId + " - " + resource.getStateModelDefId()); - - // TODO fix it - // - // Map curStateMap = resource.getCurrentStateMap(); - // for (ParticipantId participantId : curStateMap.keySet()) { - // System.out.println("\t" + participantId); - // CurState curState = curStateMap.get(participantId); - // for (PartitionId partitionId : curState.getPartitionIdSet()) { - // State state = curState.getState(partitionId); - // System.out.println("\t\t" + partitionId + " - " + state); - // } - // } - } - } -} diff --git a/helix-core/src/main/java/org/apache/helix/api/Controller.java b/helix-core/src/main/java/org/apache/helix/api/Controller.java index df28571a0a..d056e13720 100644 --- a/helix-core/src/main/java/org/apache/helix/api/Controller.java +++ b/helix-core/src/main/java/org/apache/helix/api/Controller.java @@ -38,8 +38,8 @@ public Controller(ControllerId id, LiveInstance liveInstance, boolean isLeader) if (liveInstance != null) { _runningInstance = - new RunningInstance(new SessionId(liveInstance.getSessionId()), new HelixVersion( - liveInstance.getHelixVersion()), new ProcId(liveInstance.getLiveInstance())); + new RunningInstance(new SessionId(liveInstance.getSessionIdString()), new HelixVersion( + liveInstance.getHelixVersionString()), new ProcId(liveInstance.getLiveInstance())); } else { _runningInstance = null; } diff --git a/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java index fb3f8446a3..3a7b3b6e0f 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java @@ -29,9 +29,10 @@ public ControllerAccessor(HelixDataAccessor accessor) { } /** + * create leader * @param controllerId */ public void start(ControllerId controllerId) { - + // TODO impl this } } diff --git a/helix-core/src/main/java/org/apache/helix/api/CurState.java b/helix-core/src/main/java/org/apache/helix/api/CurState.java index e66fb7a346..b6f64a9dc5 100644 --- a/helix-core/src/main/java/org/apache/helix/api/CurState.java +++ b/helix-core/src/main/java/org/apache/helix/api/CurState.java @@ -50,7 +50,7 @@ public CurState(ResourceId resourceId, ParticipantId participantId, CurrentState _participantId = participantId; Map stateMap = new HashMap(); - Map currentStateMap = currentState.getPartitionStateMap(); + Map currentStateMap = currentState.getPartitionStateStringMap(); for (String partitionId : currentStateMap.keySet()) { String state = currentStateMap.get(partitionId); stateMap.put(new PartitionId(resourceId, PartitionId.stripResourceId(partitionId)), diff --git a/helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java b/helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java deleted file mode 100644 index 1b8e2ce497..0000000000 --- a/helix-core/src/main/java/org/apache/helix/api/CurStateAccessor.java +++ /dev/null @@ -1,45 +0,0 @@ -package org.apache.helix.api; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.apache.helix.HelixDataAccessor; - -public class CurStateAccessor { - private final HelixDataAccessor _accessor; - - public CurStateAccessor(HelixDataAccessor accessor) { - _accessor = accessor; - } - - /** - * @param curStateUpdate current state change delta - */ - public void updateCurState(ParticipantId participantId, ResourceId resourceId, - CurState curStateUpdate) { - // accessor.updateProperty() - } - - /** - * - */ - public void drop(ParticipantId participantId, ResourceId resourceId) { - - } -} diff --git a/helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java deleted file mode 100644 index 41406ff20a..0000000000 --- a/helix-core/src/main/java/org/apache/helix/api/ExtViewAccessor.java +++ /dev/null @@ -1,45 +0,0 @@ -package org.apache.helix.api; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.apache.helix.HelixDataAccessor; - -public class ExtViewAccessor { - - private final HelixDataAccessor _accessor; - - public ExtViewAccessor(HelixDataAccessor accessor) { - _accessor = accessor; - } - - /** - * @param extView - */ - public void setExternalView(ExtView extView) { - - } - - /** - * - */ - public void drop(ResourceId resourceId) { - - } -} diff --git a/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java b/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java index 84d0a8f76b..a32c9c7f15 100644 --- a/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java +++ b/helix-core/src/main/java/org/apache/helix/api/HelixVersion.java @@ -48,4 +48,18 @@ public String getMajor() { public String getMinor() { return null; } + + @Override + public String toString() { + return _version; + } + + /** + * Create a version from a version string + * @param version string in the form of a.b.c.d + * @return HelixVersion + */ + public static HelixVersion from(String version) { + return new HelixVersion(version); + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/Id.java b/helix-core/src/main/java/org/apache/helix/api/Id.java index 96ce15ddb1..d31084a43a 100644 --- a/helix-core/src/main/java/org/apache/helix/api/Id.java +++ b/helix-core/src/main/java/org/apache/helix/api/Id.java @@ -51,4 +51,88 @@ public int compareTo(Id that) { return -1; } + /** + * Get a concrete resource id for a string name + * @param resourceId string resource identifier + * @return ResourceId + */ + public static ResourceId resource(String resourceId) { + if (resourceId == null) { + return null; + } + return new ResourceId(resourceId); + } + + /** + * Get a concrete partition id + * @param partitionId string partition identifier + * @return PartitionId + */ + public static PartitionId partition(String partitionId) { + if (partitionId == null) { + return null; + } + return new PartitionId(PartitionId.extractResourceId(partitionId), + PartitionId.stripResourceId(partitionId)); + } + + /** + * Get a concrete participant id + * @param participantId string participant identifier + * @return ParticipantId + */ + public static ParticipantId participant(String participantId) { + if (participantId == null) { + return null; + } + return new ParticipantId(participantId); + } + + /** + * Get a concrete session id + * @param sessionId string session identifier + * @return SessionId + */ + public static SessionId session(String sessionId) { + if (sessionId == null) { + return null; + } + return new SessionId(sessionId); + } + + /** + * Get a concrete process id + * @param procId string process identifier (e.g. pid@host) + * @return ProcId + */ + public static ProcId process(String processId) { + if (processId == null) { + return null; + } + return new ProcId(processId); + } + + /** + * Get a concrete state model definition id + * @param stateModelDefId string state model identifier + * @return StateModelDefId + */ + public static StateModelDefId stateModelDef(String stateModelDefId) { + if (stateModelDefId == null) { + return null; + } + return new StateModelDefId(stateModelDefId); + } + + /** + * Get a concrete message id + * @param messageId string message identifier + * @return MsgId + */ + public static MessageId message(String messageId) { + if (messageId == null) { + return null; + } + return new MessageId(messageId); + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/MsgId.java b/helix-core/src/main/java/org/apache/helix/api/MessageId.java similarity index 93% rename from helix-core/src/main/java/org/apache/helix/api/MsgId.java rename to helix-core/src/main/java/org/apache/helix/api/MessageId.java index 88eb4480b2..5d271c8cb0 100644 --- a/helix-core/src/main/java/org/apache/helix/api/MsgId.java +++ b/helix-core/src/main/java/org/apache/helix/api/MessageId.java @@ -19,11 +19,11 @@ * under the License. */ -public class MsgId extends Id { +public class MessageId extends Id { private final String _id; - public MsgId(String id) { + public MessageId(String id) { _id = id; } diff --git a/helix-core/src/main/java/org/apache/helix/api/Msg.java b/helix-core/src/main/java/org/apache/helix/api/Msg.java index ac7638d3fa..229d74232f 100644 --- a/helix-core/src/main/java/org/apache/helix/api/Msg.java +++ b/helix-core/src/main/java/org/apache/helix/api/Msg.java @@ -25,7 +25,7 @@ * Helix message */ public class Msg { - private final MsgId _id; + private final MessageId _id; private final SessionId _srcSessionId; private final SessionId _tgtSessionid; @@ -36,16 +36,16 @@ public class Msg { * @param message */ public Msg(Message message) { - _id = new MsgId(message.getId()); - _srcSessionId = new SessionId(message.getSrcSessionId()); - _tgtSessionid = new SessionId(message.getTgtSessionId()); + _id = new MessageId(message.getId()); + _srcSessionId = new SessionId(message.getSrcSessionIdString()); + _tgtSessionid = new SessionId(message.getTgtSessionIdString()); } /** * Get message id * @return message id */ - public MsgId getId() { + public MessageId getId() { return _id; } diff --git a/helix-core/src/main/java/org/apache/helix/api/Participant.java b/helix-core/src/main/java/org/apache/helix/api/Participant.java index e3eb68e73b..f7a9ed0b3a 100644 --- a/helix-core/src/main/java/org/apache/helix/api/Participant.java +++ b/helix-core/src/main/java/org/apache/helix/api/Participant.java @@ -46,7 +46,7 @@ public class Participant { /** * set of disabled partition id's */ - private final Set _disabledPartitionIds; + private final Set _disabledPartitionIdSet; private final Set _tags; private final RunningInstance _runningInstance; @@ -54,81 +54,29 @@ public class Participant { /** * map of resource-id to current-state */ - private final Map _currentStateMap; + private final Map _currentStateMap; /** * map of message-id to message */ - private final Map _messageMap; + private final Map _messageMap; - // TODO move this to ParticipantAccessor /** * Construct a participant * @param config */ - public Participant(ParticipantId id, InstanceConfig config, LiveInstance liveInstance, - Map currentStateMap, Map instanceMsgMap) { + public Participant(ParticipantId id, String hostName, int port, boolean isEnabled, + Set disabledPartitionIdSet, Set tags, RunningInstance runningInstance, + Map currentStateMap, Map messageMap) { _id = id; - _hostName = config.getHostName(); - - int port = -1; - try { - port = Integer.parseInt(config.getPort()); - } catch (IllegalArgumentException e) { - // keep as -1 - } - if (port < 0 || port > 65535) { - port = -1; - } + _hostName = hostName; _port = port; - _isEnabled = config.getInstanceEnabled(); - - List disabledPartitions = config.getDisabledPartitions(); - if (disabledPartitions == null) { - _disabledPartitionIds = Collections.emptySet(); - } else { - Set disabledPartitionSet = new HashSet(); - for (String partitionId : disabledPartitions) { - disabledPartitionSet.add(new PartitionId(PartitionId.extracResourceId(partitionId), - PartitionId.stripResourceId(partitionId))); - } - _disabledPartitionIds = ImmutableSet.copyOf(disabledPartitionSet); - } - - List tags = config.getTags(); - if (tags == null) { - _tags = Collections.emptySet(); - } else { - _tags = ImmutableSet.copyOf(config.getTags()); - } - - if (liveInstance != null) { - _runningInstance = - new RunningInstance(new SessionId(liveInstance.getSessionId()), new HelixVersion( - liveInstance.getHelixVersion()), new ProcId(liveInstance.getLiveInstance())); - } else { - _runningInstance = null; - } - - // TODO set curstate - // Map curStateMap = new HashMap(); - // if (currentStateMap != null) { - // for (String participantId : currentStateMap.keySet()) { - // CurState curState = - // new CurState(_id, new ParticipantId(participantId), currentStateMap.get(participantId)); - // curStateMap.put(new ParticipantId(participantId), curState); - // } - // } - // _currentStateMap = ImmutableMap.copyOf(curStateMap); - _currentStateMap = null; - - Map msgMap = new HashMap(); - for (String msgId : instanceMsgMap.keySet()) { - Message message = instanceMsgMap.get(msgId); - msgMap.put(new MsgId(msgId), new Msg(message)); - } - _messageMap = ImmutableMap.copyOf(msgMap); - + _isEnabled = isEnabled; + _disabledPartitionIdSet = ImmutableSet.copyOf(disabledPartitionIdSet); + _tags = ImmutableSet.copyOf(tags); + _runningInstance = runningInstance; + _currentStateMap = ImmutableMap.copyOf(currentStateMap); + _messageMap = ImmutableMap.copyOf(messageMap); } /** @@ -176,7 +124,7 @@ public RunningInstance getRunningInstance() { * @return set of disabled partition id's, or empty set if none */ public Set getDisablePartitionIds() { - return _disabledPartitionIds; + return _disabledPartitionIdSet; } /** @@ -191,7 +139,7 @@ public Set getTags() { * Get message map * @return message map */ - public Map getMessageMap() { + public Map getMessageMap() { return _messageMap; } @@ -199,7 +147,129 @@ public Map getMessageMap() { * Get the current states of the resource * @return map of resource-id to current state, or empty map if none */ - public Map getCurrentStateMap() { + public Map getCurrentStateMap() { return _currentStateMap; } + + public ParticipantId getId() { + return _id; + } + + /** + * Assemble a participant + */ + public static class Builder { + private final ParticipantId _id; + private final Set _disabledPartitions; + private final Set _tags; + private final Map _currentStateMap; + private final Map _messageMap; + private String _hostName; + private int _port; + private boolean _isEnabled; + private RunningInstance _runningInstance; + + /** + * Build a participant with a given id + * @param id participant id + */ + public Builder(ParticipantId id) { + _id = id; + _disabledPartitions = new HashSet(); + _tags = new HashSet(); + _currentStateMap = new HashMap(); + _messageMap = new HashMap(); + _isEnabled = true; + } + + /** + * Set the participant host name + * @param hostName reachable host when live + * @return Builder + */ + public Builder hostName(String hostName) { + _hostName = hostName; + return this; + } + + /** + * Set the participant port + * @param port port number + * @return Builder + */ + public Builder port(int port) { + _port = port; + return this; + } + + /** + * Set whether or not the participant is enabled + * @param isEnabled true if enabled, false otherwise + * @return Builder + */ + public Builder enabled(boolean isEnabled) { + _isEnabled = isEnabled; + return this; + } + + /** + * Add a partition to disable for this participant + * @param partitionId the partition to disable + * @return Builder + */ + public Builder addDisabledPartition(PartitionId partitionId) { + _disabledPartitions.add(partitionId); + return this; + } + + /** + * Add an arbitrary tag for this participant + * @param tag the tag to add + * @return Builder + */ + public Builder addTag(String tag) { + _tags.add(tag); + return this; + } + + /** + * Add live properties to participants that are running + * @param runningInstance live participant properties + * @return Builder + */ + public Builder runningInstance(RunningInstance runningInstance) { + _runningInstance = runningInstance; + return this; + } + + /** + * Add a resource current state for this participant + * @param resourceId the resource the current state corresponds to + * @param currentState the current state + * @return Builder + */ + public Builder addCurrentState(ResourceId resourceId, CurrentState currentState) { + _currentStateMap.put(resourceId, currentState); + return this; + } + + /** + * Add a message for the participant + * @param message message to add + * @return Builder + */ + public Builder addMessage(Message message) { + _messageMap.put(new MessageId(message.getId()), message); + return this; + } + + /** + * Assemble the participant + * @return instantiated Participant + */ + public Participant build() { + return new Participant(_id, _hostName, _port, _isEnabled, _disabledPartitions, _tags, + _runningInstance, _currentStateMap, _messageMap); + } + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java index 03e0992768..9de2c5a43b 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java @@ -19,80 +19,309 @@ * under the License. */ +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import org.I0Itec.zkclient.DataUpdater; +import org.apache.helix.AccessOption; +import org.apache.helix.BaseDataAccessor; import org.apache.helix.HelixDataAccessor; +import org.apache.helix.HelixException; +import org.apache.helix.PropertyKey; +import org.apache.helix.ZNRecord; +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.IdealState.RebalanceMode; +import org.apache.helix.model.InstanceConfig; +import org.apache.helix.model.InstanceConfig.InstanceConfigProperty; +import org.apache.helix.model.LiveInstance; +import org.apache.helix.model.Message; +import org.apache.log4j.Logger; public class ParticipantAccessor { + private static final Logger LOG = Logger.getLogger(ParticipantAccessor.class); + private final HelixDataAccessor _accessor; + private final PropertyKey.Builder _keyBuilder; + private final ClusterId _clusterId; - public ParticipantAccessor(HelixDataAccessor accessor) { + public ParticipantAccessor(ClusterId clusterId, HelixDataAccessor accessor) { + _clusterId = clusterId; _accessor = accessor; + _keyBuilder = accessor.keyBuilder(); } /** - * + * @param participantId + * @param isEnabled */ - public void disable(ParticipantId participantId) { + void enableParticipant(ParticipantId participantId, boolean isEnabled) { + if (_accessor.getProperty(_keyBuilder.instanceConfig(participantId.stringify())) == null) { + throw new HelixException("Config for participant: " + participantId + + " does NOT exist in cluster: " + _clusterId); + } + + InstanceConfig config = new InstanceConfig(participantId.stringify()); + config.setInstanceEnabled(isEnabled); + _accessor.updateProperty(_keyBuilder.instanceConfig(participantId.stringify()), config); } /** - * + * disable participant */ - public void enable(ParticipantId participantId) { + public void disableParticipant(ParticipantId participantId) { + enableParticipant(participantId, false); + } + /** + * enable participant + */ + public void enableParticipant(ParticipantId participantId) { + enableParticipant(participantId, false); } /** + * create messages for participant * @param msgs */ - public void insertMsgs(ParticipantId participantId, SessionId sessionId, Map msgs) { + public void insertMessagesToParticipant(ParticipantId participantId, Map msgMap) { + List msgKeys = new ArrayList(); + List msgs = new ArrayList(); + for (MessageId msgId : msgMap.keySet()) { + msgKeys.add(_keyBuilder.message(participantId.stringify(), msgId.stringify())); + msgs.add(msgMap.get(msgId)); + } + _accessor.createChildren(msgKeys, msgs); } /** + * set messages of participant * @param msgs */ - public void updateMsgs(ParticipantId participantId, SessionId sessionId, Map msgs) { - + public void setMessagesOfParticipant(ParticipantId participantId, Map msgMap) { + List msgKeys = new ArrayList(); + List msgs = new ArrayList(); + for (MessageId msgId : msgMap.keySet()) { + msgKeys.add(_keyBuilder.message(participantId.stringify(), msgId.stringify())); + msgs.add(msgMap.get(msgId)); + } + _accessor.setChildren(msgKeys, msgs); } /** + * delete messages from participant * @param msgIdSet */ - public void deleteMsgs(ParticipantId participantId, SessionId sessionId, Set msgIdSet) { + public void deleteMessagesFromParticipant(ParticipantId participantId, Set msgIdSet) { + List msgKeys = new ArrayList(); + for (MessageId msgId : msgIdSet) { + msgKeys.add(_keyBuilder.message(participantId.stringify(), msgId.stringify())); + } + // TODO impl batch remove + for (PropertyKey msgKey : msgKeys) { + _accessor.removeProperty(msgKey); + } } /** - * @param disablePartitionSet + * @param enabled + * @param participantId + * @param resourceId + * @param partitionIdSet */ - public void disablePartitions(ParticipantId participantId, Set disablePartitionSet) { + void enablePartitionsForParticipant(final boolean enabled, final ParticipantId participantId, + final ResourceId resourceId, final Set partitionIdSet) { + // check instanceConfig exists + PropertyKey instanceConfigKey = _keyBuilder.instanceConfig(participantId.stringify()); + if (_accessor.getProperty(instanceConfigKey) == null) { + throw new HelixException("Config for participant: " + participantId + + " does NOT exist in cluster: " + _clusterId); + } + + // check resource exist. warn if not + IdealState idealState = _accessor.getProperty(_keyBuilder.idealStates(resourceId.stringify())); + if (idealState == null) { + LOG.warn("Disable partitions: " + partitionIdSet + " but Cluster: " + _clusterId + + ", resource: " + resourceId + + " does NOT exists. probably disable it during ERROR->DROPPED transtition"); + } else { + // check partitions exist. warn if not + for (PartitionId partitionId : partitionIdSet) { + if ((idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO && idealState + .getPreferenceList(partitionId.stringify()) == null) + || (idealState.getRebalanceMode() == RebalanceMode.CUSTOMIZED && idealState + .getInstanceStateMap(partitionId.stringify()) == null)) { + LOG.warn("Cluster: " + _clusterId + ", resource: " + resourceId + ", partition: " + + partitionId + ", partition does not exist in ideal state"); + } + } + } + + // TODO merge list logic should go to znrecord updater + // update participantConfig + // could not use ZNRecordUpdater since it doesn't do listField merge/subtract + BaseDataAccessor baseAccessor = _accessor.getBaseDataAccessor(); + final List partitionNames = new ArrayList(); + for (PartitionId partitionId : partitionIdSet) { + partitionNames.add(partitionId.stringify()); + } + baseAccessor.update(instanceConfigKey.getPath(), new DataUpdater() { + @Override + public ZNRecord update(ZNRecord currentData) { + if (currentData == null) { + throw new HelixException("Cluster: " + _clusterId + ", instance: " + participantId + + ", participant config is null"); + } + + // TODO: merge with InstanceConfig.setInstanceEnabledForPartition + List list = + currentData.getListField(InstanceConfigProperty.HELIX_DISABLED_PARTITION.toString()); + Set disabledPartitions = new HashSet(); + if (list != null) { + disabledPartitions.addAll(list); + } + + if (enabled) { + disabledPartitions.removeAll(partitionNames); + } else { + disabledPartitions.addAll(partitionNames); + } + + list = new ArrayList(disabledPartitions); + Collections.sort(list); + currentData.setListField(InstanceConfigProperty.HELIX_DISABLED_PARTITION.toString(), list); + return currentData; + } + }, AccessOption.PERSISTENT); } /** - * @param enablePartitionSet + * @param disablePartitionSet */ - public void enablePartitions(ParticipantId participantId, Set enablePartitionSet) { + public void disablePartitionsForParticipant(ParticipantId participantId, ResourceId resourceId, + Set disablePartitionIdSet) { + enablePartitionsForParticipant(false, participantId, resourceId, disablePartitionIdSet); + } + /** + * @param enablePartitionSet + */ + public void enablePartitionsForParticipant(ParticipantId participantId, ResourceId resourceId, + Set enablePartitionIdSet) { + enablePartitionsForParticipant(true, participantId, resourceId, enablePartitionIdSet); } /** * create live instance for the participant * @param participantId */ - public void start(ParticipantId participantId) { - + public void startParticipant(ParticipantId participantId) { + // TODO impl this } /** + * read participant related data * @param participantId * @return */ - public Participant read(ParticipantId participantId) { - return null; + public Participant readParticipant(ParticipantId participantId) { + // read physical model + String participantName = participantId.stringify(); + InstanceConfig instanceConfig = _accessor.getProperty(_keyBuilder.instance(participantName)); + LiveInstance liveInstance = _accessor.getProperty(_keyBuilder.liveInstance(participantName)); + + Map instanceMsgMap = Collections.emptyMap(); + Map instanceCurStateMap = Collections.emptyMap(); + if (liveInstance != null) { + SessionId sessionId = liveInstance.getSessionId(); + + instanceMsgMap = _accessor.getChildValuesMap(_keyBuilder.messages(participantName)); + instanceCurStateMap = + _accessor.getChildValuesMap(_keyBuilder.currentStates(participantName, + sessionId.stringify())); + } + + // convert to logical model + String hostName = instanceConfig.getHostName(); + + int port = -1; + try { + port = Integer.parseInt(instanceConfig.getPort()); + } catch (IllegalArgumentException e) { + // keep as -1 + } + if (port < 0 || port > 65535) { + port = -1; + } + boolean isEnabled = instanceConfig.getInstanceEnabled(); + + List disabledPartitions = instanceConfig.getDisabledPartitions(); + Set disabledPartitionIdSet; + if (disabledPartitions == null) { + disabledPartitionIdSet = Collections.emptySet(); + } else { + disabledPartitionIdSet = new HashSet(); + for (String partitionId : disabledPartitions) { + disabledPartitionIdSet.add(new PartitionId(PartitionId.extractResourceId(partitionId), + PartitionId.stripResourceId(partitionId))); + } + } + + Set tags = new HashSet(instanceConfig.getTags()); + + RunningInstance runningInstance = null; + if (liveInstance != null) { + runningInstance = + new RunningInstance(new SessionId(liveInstance.getSessionIdString()), new HelixVersion( + liveInstance.getHelixVersionString()), new ProcId(liveInstance.getLiveInstance())); + } + + Map msgMap = new HashMap(); + for (String msgId : instanceMsgMap.keySet()) { + Message message = instanceMsgMap.get(msgId); + msgMap.put(new MessageId(msgId), message); + } + + // TODO convert current state + // Map curStateMap = new HashMap(); + // if (currentStateMap != null) { + // for (String participantId : currentStateMap.keySet()) { + // CurState curState = + // new CurState(_id, new ParticipantId(participantId), currentStateMap.get(participantId)); + // curStateMap.put(new ParticipantId(participantId), curState); + // } + // } + + return new Participant(participantId, hostName, port, isEnabled, disabledPartitionIdSet, tags, + runningInstance, null, msgMap); + } + + /** + * update resource current state of a participant + * @param curStateUpdate current state change delta + */ + public void updateParticipantCurrentState(ParticipantId participantId, SessionId sessionId, + ResourceId resourceId, CurrentState curStateUpdate) { + _accessor.updateProperty( + _keyBuilder.currentState(participantId.stringify(), sessionId.stringify(), + resourceId.stringify()), curStateUpdate); + } + + /** + * drop resource current state of a participant + */ + public void dropParticipantCurrentState(ParticipantId participantId, SessionId sessionId, + ResourceId resourceId) { + _accessor.removeProperty(_keyBuilder.currentState(participantId.stringify(), + sessionId.stringify(), resourceId.stringify())); } } diff --git a/helix-core/src/main/java/org/apache/helix/api/PartitionId.java b/helix-core/src/main/java/org/apache/helix/api/PartitionId.java index 3bec1ad165..04dbab1e46 100644 --- a/helix-core/src/main/java/org/apache/helix/api/PartitionId.java +++ b/helix-core/src/main/java/org/apache/helix/api/PartitionId.java @@ -45,7 +45,7 @@ public static String stripResourceId(String partitionName) { * @param partitionName * @return */ - public static ResourceId extracResourceId(String partitionName) { + public static ResourceId extractResourceId(String partitionName) { return new ResourceId(partitionName.substring(0, partitionName.lastIndexOf("_"))); } } diff --git a/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java b/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java index 219e86793d..cf4fbbca3f 100644 --- a/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java +++ b/helix-core/src/main/java/org/apache/helix/api/RebalancerConfig.java @@ -28,12 +28,12 @@ public class RebalancerConfig { private final RscAssignment _resourceAssignment; - public RebalancerConfig() { - _rebalancerMode = RebalanceMode.NONE; - _rebalancerRef = null; - _stateModelDefId = null; - - _resourceAssignment = null; + public RebalancerConfig(RebalanceMode mode, RebalancerRef rebalancerRef, + StateModelDefId stateModelDefId, RscAssignment resourceAssignment) { + _rebalancerMode = mode; + _rebalancerRef = rebalancerRef; + _stateModelDefId = stateModelDefId; + _resourceAssignment = resourceAssignment; } /** @@ -59,4 +59,69 @@ public RebalancerRef getRebalancerRef() { public StateModelDefId getStateModelDefId() { return _stateModelDefId; } + + /** + * Get the ideal node and state assignment of the resource + * @return resource assignment + */ + public RscAssignment getResourceAssignment() { + return _resourceAssignment; + } + + /** + * Assembles a RebalancerConfig + */ + public static class Builder { + private RebalanceMode _mode = RebalanceMode.NONE; + private RebalancerRef _rebalancerRef; + private StateModelDefId _stateModelDefId; + private RscAssignment _resourceAssignment; + + /** + * Set the rebalancer mode + * @param mode {@link RebalanceMode} + */ + public Builder rebalancerMode(RebalanceMode mode) { + _mode = mode; + return this; + } + + /** + * Set a user-defined rebalancer + * @param rebalancerRef a reference to the rebalancer + * @return Builder + */ + public Builder rebalancer(RebalancerRef rebalancerRef) { + _rebalancerRef = rebalancerRef; + return this; + } + + /** + * Set the state model definition + * @param stateModelDefId state model identifier + * @return Builder + */ + public Builder stateModelDef(StateModelDefId stateModelDefId) { + _stateModelDefId = stateModelDefId; + return this; + } + + /** + * Set the full assignment of partitions to nodes and corresponding states + * @param resourceAssignment resource assignment + * @return Builder + */ + public Builder resourceAssignment(RscAssignment resourceAssignment) { + _resourceAssignment = resourceAssignment; + return this; + } + + /** + * Assemble a RebalancerConfig + * @return a fully defined rebalancer configuration + */ + public RebalancerConfig build() { + return new RebalancerConfig(_mode, _rebalancerRef, _stateModelDefId, _resourceAssignment); + } + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java b/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java index 9011da979a..033e1e76d3 100644 --- a/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java +++ b/helix-core/src/main/java/org/apache/helix/api/RebalancerRef.java @@ -48,4 +48,25 @@ public Rebalancer getRebalancer() { return null; } + @Override + public String toString() { + return _rebalancerClassName; + } + + @Override + public boolean equals(Object that) { + if (that instanceof RebalancerRef) { + return this.toString().equals(((RebalancerRef) that).toString()); + } + return false; + } + + /** + * Get a rebalancer class reference + * @param rebalancerClassName name of the class + * @return RebalancerRef + */ + public static RebalancerRef from(String rebalancerClassName) { + return new RebalancerRef(rebalancerClassName); + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/Resource.java b/helix-core/src/main/java/org/apache/helix/api/Resource.java index b76a0f8d2f..354d0a12be 100644 --- a/helix-core/src/main/java/org/apache/helix/api/Resource.java +++ b/helix-core/src/main/java/org/apache/helix/api/Resource.java @@ -19,18 +19,12 @@ * under the License. */ -import java.util.HashMap; import java.util.HashSet; -import java.util.Map; import java.util.Set; -import org.apache.helix.controller.rebalancer.Rebalancer; -import org.apache.helix.model.CurrentState; import org.apache.helix.model.IdealState; -import org.apache.helix.model.IdealState.RebalanceMode; import org.apache.helix.model.ResourceAssignment; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; /** @@ -58,7 +52,7 @@ public Resource(ResourceId id, IdealState idealState, ResourceAssignment rscAssi _rebalancerConfig = null; Set partitionSet = new HashSet(); - for (String partitionId : idealState.getPartitionSet()) { + for (String partitionId : idealState.getPartitionStringSet()) { partitionSet .add(new Partition(new PartitionId(id, PartitionId.stripResourceId(partitionId)))); } @@ -70,6 +64,21 @@ public Resource(ResourceId id, IdealState idealState, ResourceAssignment rscAssi _externalView = null; } + /** + * Construct a Resource + * @param id resource identifier + * @param partitionSet disjoint partitions of the resource + * @param externalView external view of the resource + * @param rebalancerConfig configuration properties for rebalancing this resource + */ + public Resource(ResourceId id, Set partitionSet, ExtView externalView, + RebalancerConfig rebalancerConfig) { + _id = id; + _partitionSet = ImmutableSet.copyOf(partitionSet); + _externalView = externalView; + _rebalancerConfig = rebalancerConfig; + } + /** * Get the set of partitions of the resource * @return set of partitions or empty set if none @@ -86,4 +95,68 @@ public ExtView getExternalView() { return _externalView; } + public RebalancerConfig getRebalancerConfig() { + return _rebalancerConfig; + } + + public ResourceId getId() { + return _id; + } + + /** + * Assembles a Resource + */ + public static class Builder { + private final ResourceId _id; + private final Set _partitionSet; + private ExtView _externalView; + private RebalancerConfig _rebalancerConfig; + + /** + * Build a Resource with an id + * @param id resource id + */ + public Builder(ResourceId id) { + _id = id; + _partitionSet = new HashSet(); + } + + /** + * Add a partition that the resource serves + * @param partition fully-qualified partition + * @return Builder + */ + public Builder addPartition(Partition partition) { + _partitionSet.add(partition); + return this; + } + + /** + * Set the external view of this resource + * @param extView currently served replica placement and state + * @return Builder + */ + public Builder externalView(ExtView extView) { + _externalView = extView; + return this; + } + + /** + * Set the rebalancer configuration + * @param rebalancerConfig properties of interest for rebalancing + * @return Builder + */ + public Builder rebalancerConfig(RebalancerConfig rebalancerConfig) { + _rebalancerConfig = rebalancerConfig; + return this; + } + + /** + * Create a Resource object + * @return instantiated Resource + */ + public Resource build() { + return new Resource(_id, _partitionSet, _externalView, _rebalancerConfig); + } + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java index b5a6516fea..c0757b41b1 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java @@ -20,26 +20,47 @@ */ import org.apache.helix.HelixDataAccessor; +import org.apache.helix.PropertyKey; +import org.apache.helix.model.ExternalView; +import org.apache.helix.model.IdealState; public class ResourceAccessor { - + private final ClusterId _clusterId; private final HelixDataAccessor _accessor; + private final PropertyKey.Builder _keyBuilder; - public ResourceAccessor(HelixDataAccessor accessor) { + public ResourceAccessor(ClusterId clusterId, HelixDataAccessor accessor) { + _clusterId = clusterId; _accessor = accessor; + _keyBuilder = accessor.keyBuilder(); } /** - * + * save resource assignment */ public void setRresourceAssignment(ResourceId resourceId, RscAssignment resourceAssignment) { + // TODO impl this + } + /** + * set ideal-state + */ + public void setResourceIdealState(ResourceId resourceId, IdealState idealState) { + _accessor.setProperty(_keyBuilder.idealStates(resourceId.stringify()), idealState); } /** - * + * set external view of a resource + * @param extView */ - public void setRebalancerConfig(RebalancerConfig config) { + public void setResourceExternalView(ResourceId resourceId, ExternalView extView) { + _accessor.setProperty(_keyBuilder.idealStates(resourceId.stringify()), extView); + } + /** + * drop external view of a resource + */ + public void dropResourceExternalView(ResourceId resourceId) { + _accessor.removeProperty(_keyBuilder.idealStates(resourceId.stringify())); } } diff --git a/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java b/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java index 88e4ff6d94..90b77e30d0 100644 --- a/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java +++ b/helix-core/src/main/java/org/apache/helix/api/RscAssignment.java @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.Set; import org.apache.helix.model.ResourceAssignment; @@ -29,16 +30,111 @@ public class RscAssignment { private final Map> _resourceAssignment; + /** + * Construct an assignment from a physically-stored assignment + * @param rscAssignment the assignment + */ public RscAssignment(ResourceAssignment rscAssignment) { Map> resourceAssignment = new HashMap>(); - // TODO fill the map + for (org.apache.helix.model.Partition partition : rscAssignment.getMappedPartitions()) { + Map replicaMap = new HashMap(); + Map rawReplicaMap = rscAssignment.getReplicaMap(partition); + for (String participantId : rawReplicaMap.keySet()) { + replicaMap.put(new ParticipantId(participantId), + new State(rawReplicaMap.get(participantId))); + } + resourceAssignment.put(new PartitionId(new ResourceId(rscAssignment.getResourceName()), + partition.getPartitionName()), replicaMap); + } _resourceAssignment = ImmutableMap.copyOf(resourceAssignment); } + /** + * Build an assignment from a map of assigned replicas + * @param resourceAssignment map of (partition, participant, state) + */ + public RscAssignment(Map> resourceAssignment) { + ImmutableMap.Builder> mapBuilder = + new ImmutableMap.Builder>(); + for (PartitionId partitionId : resourceAssignment.keySet()) { + mapBuilder.put(partitionId, ImmutableMap.copyOf(resourceAssignment.get(partitionId))); + } + _resourceAssignment = mapBuilder.build(); + } + + /** + * Get the partitions currently with assignments + * @return set of partition ids + */ + public Set getAssignedPartitions() { + return _resourceAssignment.keySet(); + } + + /** + * Get the replica assignment map for a partition + * @param partitionId the partition to look up + * @return map of (participant id, state) + */ public Map getParticipantStateMap(PartitionId partitionId) { return _resourceAssignment.get(partitionId); } + + /** + * Assemble a full assignment + */ + public static class Builder { + private final Map> _resourceAssignment; + + /** + * Instantiate the builder + */ + public Builder() { + _resourceAssignment = new HashMap>(); + } + + /** + * Add assignments for a partition + * @param partitionId partition to assign + * @param replicaMap map of participant and state for each replica + * @return Builder + */ + public Builder addAssignments(PartitionId partitionId, Map replicaMap) { + if (!_resourceAssignment.containsKey(partitionId)) { + _resourceAssignment.put(partitionId, replicaMap); + } else { + _resourceAssignment.get(partitionId).putAll(replicaMap); + } + return this; + } + + /** + * Assign a single replica + * @param partitionId partition to assign + * @param participantId participant to host the replica + * @param state replica state + * @return Builder + */ + public Builder addAssignment(PartitionId partitionId, ParticipantId participantId, State state) { + Map replicaMap; + if (!_resourceAssignment.containsKey(partitionId)) { + replicaMap = new HashMap(); + _resourceAssignment.put(partitionId, replicaMap); + } else { + replicaMap = _resourceAssignment.get(partitionId); + } + replicaMap.put(participantId, state); + return this; + } + + /** + * Build the resource assignment + * @return instantiated RscAssignment + */ + public RscAssignment build() { + return new RscAssignment(_resourceAssignment); + } + } } diff --git a/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java b/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java index 49c5ccfca1..4effd248b4 100644 --- a/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java +++ b/helix-core/src/main/java/org/apache/helix/api/RunningInstance.java @@ -63,5 +63,4 @@ public HelixVersion getVersion() { public ProcId getPid() { return _pid; } - } diff --git a/helix-core/src/main/java/org/apache/helix/api/State.java b/helix-core/src/main/java/org/apache/helix/api/State.java index b2000f2b0a..0fc8bc7735 100644 --- a/helix-core/src/main/java/org/apache/helix/api/State.java +++ b/helix-core/src/main/java/org/apache/helix/api/State.java @@ -33,4 +33,24 @@ public State(String state) { public String toString() { return _state; } + + @Override + public boolean equals(Object that) { + if (that instanceof State) { + return this.toString().equals(((State) that).toString()); + } + return false; + } + + /** + * Get a State from a state name + * @param state state name + * @return State + */ + public static State from(String state) { + if (state == null) { + return null; + } + return new State(state); + } } diff --git a/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java b/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java index 8e4e1ea1b3..5ba4362c64 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java +++ b/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java @@ -480,7 +480,7 @@ protected void checkLiveInstancesObservation(List liveInstances, Map curSessions = new HashMap(); for (LiveInstance liveInstance : liveInstances) { curInstances.put(liveInstance.getInstanceName(), liveInstance); - curSessions.put(liveInstance.getSessionId(), liveInstance); + curSessions.put(liveInstance.getSessionIdString(), liveInstance); } Map lastInstances = _lastSeenInstances.get(); diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java index 9564e35fb7..6d64bb8b83 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/AutoRebalancer.java @@ -71,7 +71,7 @@ public void init(HelixManager manager) { public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { // Compute a preference list based on the current ideal state - List partitions = new ArrayList(currentIdealState.getPartitionSet()); + List partitions = new ArrayList(currentIdealState.getPartitionStringSet()); String stateModelName = currentIdealState.getStateModelDefRef(); StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); Map liveInstance = clusterData.getLiveInstances(); diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java index 8557fa0d8c..5de420c715 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/CustomRebalancer.java @@ -102,7 +102,7 @@ private Map computeCustomizedBestStateForPartition(ClusterDataCa HelixDefinedState.ERROR.toString())) && disabledInstancesForPartition.contains(instance)) { // if disabled and not in ERROR state, transit to initial-state (e.g. OFFLINE) - instanceStateMap.put(instance, stateModelDef.getInitialState()); + instanceStateMap.put(instance, stateModelDef.getInitialStateString()); } } } diff --git a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java index d2dbdef995..ee7524f8f1 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java +++ b/helix-core/src/main/java/org/apache/helix/controller/rebalancer/util/ConstraintBasedAssignment.java @@ -28,8 +28,8 @@ import java.util.Map; import java.util.Set; -import org.apache.helix.HelixDefinedState; import org.apache.helix.HelixConstants.StateModelToken; +import org.apache.helix.HelixDefinedState; import org.apache.helix.controller.stages.ClusterDataCache; import org.apache.helix.model.IdealState; import org.apache.helix.model.LiveInstance; @@ -86,7 +86,7 @@ public static Map computeAutoBestStateForPartition(ClusterDataCa HelixDefinedState.ERROR.toString())) && disabledInstancesForPartition.contains(instance)) { // if disabled and not in ERROR state, transit to initial-state (e.g. OFFLINE) - instanceStateMap.put(instance, stateModelDef.getInitialState()); + instanceStateMap.put(instance, stateModelDef.getInitialStateString()); } } } @@ -96,7 +96,7 @@ public static Map computeAutoBestStateForPartition(ClusterDataCa return instanceStateMap; } - List statesPriorityList = stateModelDef.getStatesPriorityList(); + List statesPriorityList = stateModelDef.getStatesPriorityStringList(); boolean assigned[] = new boolean[instancePreferenceList.size()]; Map liveInstancesMap = cache.getLiveInstances(); @@ -151,7 +151,7 @@ public static Map computeAutoBestStateForPartition(ClusterDataCa public static LinkedHashMap stateCount(StateModelDefinition stateModelDef, int liveNodesNb, int totalReplicas) { LinkedHashMap stateCountMap = new LinkedHashMap(); - List statesPriorityList = stateModelDef.getStatesPriorityList(); + List statesPriorityList = stateModelDef.getStatesPriorityStringList(); int replicas = totalReplicas; for (String state : statesPriorityList) { diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ClusterDataCache.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ClusterDataCache.java index b90880eb73..062f4684c1 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/ClusterDataCache.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ClusterDataCache.java @@ -72,7 +72,7 @@ public boolean refresh(HelixDataAccessor accessor) { _liveInstanceMap = accessor.getChildValuesMap(keyBuilder.liveInstances()); for (LiveInstance instance : _liveInstanceMap.values()) { - LOG.trace("live instance: " + instance.getInstanceName() + " " + instance.getSessionId()); + LOG.trace("live instance: " + instance.getInstanceName() + " " + instance.getSessionIdString()); } _stateModelDefMap = accessor.getChildValuesMap(keyBuilder.stateModelDefs()); @@ -90,7 +90,7 @@ public boolean refresh(HelixDataAccessor accessor) { new HashMap>>(); for (String instanceName : _liveInstanceMap.keySet()) { LiveInstance liveInstance = _liveInstanceMap.get(instanceName); - String sessionId = liveInstance.getSessionId(); + String sessionId = liveInstance.getSessionIdString(); if (!allCurStateMap.containsKey(instanceName)) { allCurStateMap.put(instanceName, new HashMap>()); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/CompatibilityCheckStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/CompatibilityCheckStage.java index d8f98ed7b3..a1b7b0d8ad 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/CompatibilityCheckStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/CompatibilityCheckStage.java @@ -46,7 +46,7 @@ public void process(ClusterEvent event) throws Exception { HelixManagerProperties properties = manager.getProperties(); Map liveInstanceMap = cache.getLiveInstances(); for (LiveInstance liveInstance : liveInstanceMap.values()) { - String participantVersion = liveInstance.getHelixVersion(); + String participantVersion = liveInstance.getHelixVersionString(); if (!properties.isParticipantCompatible(participantVersion)) { String errorMsg = "incompatible participant. pipeline will not continue. " + "controller: " diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/CurrentStateComputationStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/CurrentStateComputationStage.java index 6097432517..1e4e72b268 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/CurrentStateComputationStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/CurrentStateComputationStage.java @@ -57,7 +57,7 @@ public void process(ClusterEvent event) throws Exception { if (!MessageType.STATE_TRANSITION.toString().equalsIgnoreCase(message.getMsgType())) { continue; } - if (!instance.getSessionId().equals(message.getTgtSessionId())) { + if (!instance.getSessionIdString().equals(message.getTgtSessionIdString())) { continue; } String resourceName = message.getResourceName(); @@ -71,7 +71,7 @@ public void process(ClusterEvent event) throws Exception { Partition partition = resource.getPartition(partitionName); if (partition != null) { currentStateOutput.setPendingState(resourceName, partition, instanceName, - message.getToState()); + message.getToStateString()); } else { // log } @@ -82,7 +82,7 @@ public void process(ClusterEvent event) throws Exception { Partition partition = resource.getPartition(partitionName); if (partition != null) { currentStateOutput.setPendingState(resourceName, partition, instanceName, - message.getToState()); + message.getToStateString()); } else { // log } @@ -94,12 +94,12 @@ public void process(ClusterEvent event) throws Exception { for (LiveInstance instance : liveInstances.values()) { String instanceName = instance.getInstanceName(); - String clientSessionId = instance.getSessionId(); + String clientSessionId = instance.getSessionIdString(); Map currentStateMap = cache.getCurrentState(instanceName, clientSessionId); for (CurrentState currentState : currentStateMap.values()) { - if (!instance.getSessionId().equals(currentState.getSessionId())) { + if (!instance.getSessionIdString().equals(currentState.getSessionIdString())) { continue; } String resourceName = currentState.getResourceName(); @@ -114,7 +114,7 @@ public void process(ClusterEvent event) throws Exception { currentStateOutput.setBucketSize(resourceName, currentState.getBucketSize()); - Map partitionStateMap = currentState.getPartitionStateMap(); + Map partitionStateMap = currentState.getPartitionStateStringMap(); for (String partitionName : partitionStateMap.keySet()) { Partition partition = resource.getPartition(partitionName); if (partition != null) { diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java index 35ef177a8b..d50bd9ec41 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java @@ -168,7 +168,7 @@ private void updateScheduledTaskStatus(ExternalView ev, HelixManager manager, Builder keyBuilder = accessor.keyBuilder(); - for (String taskPartitionName : ev.getPartitionSet()) { + for (String taskPartitionName : ev.getPartitionStringSet()) { for (String taskState : ev.getStateMap(taskPartitionName).values()) { if (taskState.equalsIgnoreCase(HelixDefinedState.ERROR.toString()) || taskState.equalsIgnoreCase("COMPLETED")) { @@ -193,7 +193,7 @@ private void updateScheduledTaskStatus(ExternalView ev, HelixManager manager, } } // fill the controllerMsgIdCountMap - for (String taskId : taskQueueIdealState.getPartitionSet()) { + for (String taskId : taskQueueIdealState.getPartitionStringSet()) { String controllerMsgId = taskQueueIdealState.getRecord().getMapField(taskId) .get(DefaultSchedulerMessageHandlerFactory.CONTROLLER_MSG_ID); diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/MessageGenerationPhase.java b/helix-core/src/main/java/org/apache/helix/controller/stages/MessageGenerationPhase.java index 92964e93f7..d0237d73b9 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/MessageGenerationPhase.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/MessageGenerationPhase.java @@ -64,7 +64,7 @@ public void process(ClusterEvent event) throws Exception { Map sessionIdMap = new HashMap(); for (LiveInstance liveInstance : liveInstances.values()) { - sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); + sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionIdString()); } MessageGenerationOutput output = new MessageGenerationOutput(); @@ -89,7 +89,7 @@ public void process(ClusterEvent event) throws Exception { String currentState = currentStateOutput.getCurrentState(resourceName, partition, instanceName); if (currentState == null) { - currentState = stateModelDef.getInitialState(); + currentState = stateModelDef.getInitialStateString(); } if (desiredState.equalsIgnoreCase(currentState)) { @@ -170,7 +170,7 @@ public void process(ClusterEvent event) throws Exception { } // add generated messages to output according to state priority - List statesPriorityList = stateModelDef.getStatesPriorityList(); + List statesPriorityList = stateModelDef.getStatesPriorityStringList(); for (String state : statesPriorityList) { if (messageMap.containsKey(state)) { for (Message message : messageMap.get(state)) { diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/MessageSelectionStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/MessageSelectionStage.java index 9a420aa5f5..8478db3b4e 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/MessageSelectionStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/MessageSelectionStage.java @@ -104,7 +104,7 @@ public void process(ClusterEvent event) throws Exception { selectMessages(cache.getLiveInstances(), currentStateOutput.getCurrentStateMap(resourceName, partition), currentStateOutput.getPendingStateMap(resourceName, partition), messages, - stateConstraints, stateTransitionPriorities, stateModelDef.getInitialState()); + stateConstraints, stateTransitionPriorities, stateModelDef.getInitialStateString()); output.addMessages(resourceName, partition, selectedMessages); } } @@ -169,8 +169,8 @@ List selectMessages(Map liveInstances, Map> messagesGroupByStateTransitPriority = new TreeMap>(); for (Message message : messages) { - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String transition = fromState + "-" + toState; int priority = Integer.MAX_VALUE; @@ -187,8 +187,8 @@ List selectMessages(Map liveInstances, // select messages for (List messageList : messagesGroupByStateTransitPriority.values()) { for (Message message : messageList) { - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); if (!bounds.containsKey(fromState)) { LOG.error("Message's fromState is not in currentState. message: " + message); @@ -239,7 +239,7 @@ private Map computeStateConstraints(StateModelDefinition stateMo IdealState idealState, ClusterDataCache cache) { Map stateConstraints = new HashMap(); - List statePriorityList = stateModelDefinition.getStatesPriorityList(); + List statePriorityList = stateModelDefinition.getStatesPriorityStringList(); for (String state : statePriorityList) { String numInstancesPerState = stateModelDefinition.getNumInstancesPerState(state); int max = -1; @@ -272,7 +272,7 @@ private Map computeStateConstraints(StateModelDefinition stateMo // so that behavior is consistent private Map getStateTransitionPriorityMap(StateModelDefinition stateModelDef) { Map stateTransitionPriorities = new HashMap(); - List stateTransitionPriorityList = stateModelDef.getStateTransitionPriorityList(); + List stateTransitionPriorityList = stateModelDef.getStateTransitionPriorityStringList(); for (int i = 0; i < stateTransitionPriorityList.size(); i++) { stateTransitionPriorities.put(stateTransitionPriorityList.get(i), i); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java index d82ee2f2af..f16bb39c04 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/RebalanceIdealStateStage.java @@ -60,7 +60,7 @@ public void process(ClusterEvent event) throws Exception { (Rebalancer) (HelixUtil.loadClass(getClass(), rebalancerClassName).newInstance()); balancer.init(manager); Resource resource = new Resource(resourceName); - for (String partitionName : currentIdealState.getPartitionSet()) { + for (String partitionName : currentIdealState.getPartitionStringSet()) { resource.addPartition(partitionName); } ResourceAssignment resourceAssignment = diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceComputationStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceComputationStage.java index 51f0ec1cdf..b67a426c22 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceComputationStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceComputationStage.java @@ -53,7 +53,7 @@ public void process(ClusterEvent event) throws Exception { if (idealStates != null && idealStates.size() > 0) { for (IdealState idealState : idealStates.values()) { - Set partitionSet = idealState.getPartitionSet(); + Set partitionSet = idealState.getPartitionStringSet(); String resourceName = idealState.getResourceName(); for (String partition : partitionSet) { @@ -74,7 +74,7 @@ public void process(ClusterEvent event) throws Exception { if (availableInstances != null && availableInstances.size() > 0) { for (LiveInstance instance : availableInstances.values()) { String instanceName = instance.getInstanceName(); - String clientSessionId = instance.getSessionId(); + String clientSessionId = instance.getSessionIdString(); Map currentStateMap = cache.getCurrentState(instanceName, clientSessionId); @@ -84,7 +84,7 @@ public void process(ClusterEvent event) throws Exception { for (CurrentState currentState : currentStateMap.values()) { String resourceName = currentState.getResourceName(); - Map resourceStateMap = currentState.getPartitionStateMap(); + Map resourceStateMap = currentState.getPartitionStateStringMap(); // don't overwrite ideal state settings if (!resourceMap.containsKey(resourceName)) { @@ -98,8 +98,8 @@ public void process(ClusterEvent event) throws Exception { if (currentState.getStateModelDefRef() == null) { LOG.error("state model def is null." + "resource:" + currentState.getResourceName() - + ", partitions: " + currentState.getPartitionStateMap().keySet() + ", states: " - + currentState.getPartitionStateMap().values()); + + ", partitions: " + currentState.getPartitionStateStringMap().keySet() + ", states: " + + currentState.getPartitionStateStringMap().values()); throw new StageException("State model def is null for resource:" + currentState.getResourceName()); } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/TaskAssignmentStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/TaskAssignmentStage.java index 192a645d02..85ae163707 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/TaskAssignmentStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/TaskAssignmentStage.java @@ -96,7 +96,7 @@ List batchMessage(Builder keyBuilder, List messages, LiveInstance liveInstance = liveInstanceMap.get(instanceName); String participantVersion = null; if (liveInstance != null) { - participantVersion = liveInstance.getHelixVersion(); + participantVersion = liveInstance.getHelixVersionString(); } if (resource == null || !resource.getBatchMessageMode() || participantVersion == null @@ -106,9 +106,9 @@ List batchMessage(Builder keyBuilder, List messages, } String key = - keyBuilder.currentState(message.getTgtName(), message.getTgtSessionId(), + keyBuilder.currentState(message.getTgtName(), message.getTgtSessionIdString(), message.getResourceName()).getPath() - + "/" + message.getFromState() + "/" + message.getToState(); + + "/" + message.getFromStateString() + "/" + message.getToStateString(); if (!batchMessages.containsKey(key)) { Message batchMessage = new Message(message.getRecord()); @@ -131,9 +131,9 @@ protected void sendMessages(HelixDataAccessor dataAccessor, List messag List keys = new ArrayList(); for (Message message : messages) { - logger.info("Sending Message " + message.getMsgId() + " to " + message.getTgtName() + logger.info("Sending Message " + message.getMsgIdString() + " to " + message.getTgtName() + " transit " + message.getPartitionName() + "|" + message.getPartitionNames() + " from:" - + message.getFromState() + " to:" + message.getToState()); + + message.getFromStateString() + " to:" + message.getToStateString()); // System.out.println("[dbg] Sending Message " + message.getMsgId() + " to " + // message.getTgtName() diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ControllerManager.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ControllerManager.java index 1ed6dea35b..0fb67099b9 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ControllerManager.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ControllerManager.java @@ -150,7 +150,7 @@ public boolean isLeader() { LiveInstance leader = _dataAccessor.getProperty(_keyBuilder.controllerLeader()); if (leader != null) { String leaderName = leader.getInstanceName(); - String sessionId = leader.getSessionId(); + String sessionId = leader.getSessionIdString(); if (leaderName != null && leaderName.equals(_instanceName) && sessionId != null && sessionId.equals(_sessionId)) { return true; diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/CurStateCarryOverUpdater.java b/helix-core/src/main/java/org/apache/helix/manager/zk/CurStateCarryOverUpdater.java index b96de1855f..3fd3fb8da3 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/CurStateCarryOverUpdater.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/CurStateCarryOverUpdater.java @@ -57,7 +57,7 @@ public ZNRecord update(ZNRecord currentData) { curState = new CurrentState(currentData); } - for (String partitionName : _lastCurState.getPartitionStateMap().keySet()) { + for (String partitionName : _lastCurState.getPartitionStateStringMap().keySet()) { // carry-over only when current-state not exist if (curState.getState(partitionName) == null) { curState.setState(partitionName, _initState); diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultControllerMessageHandlerFactory.java b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultControllerMessageHandlerFactory.java index 5f6d0835c2..523adbaa77 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultControllerMessageHandlerFactory.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultControllerMessageHandlerFactory.java @@ -36,7 +36,7 @@ public MessageHandler createHandler(Message message, NotificationContext context String type = message.getMsgType(); if (!type.equals(getMessageType())) { - throw new HelixException("Unexpected msg type for message " + message.getMsgId() + " type:" + throw new HelixException("Unexpected msg type for message " + message.getMsgIdString() + " type:" + message.getMsgType()); } @@ -63,18 +63,18 @@ public HelixTaskResult handleMessage() throws InterruptedException { String type = _message.getMsgType(); HelixTaskResult result = new HelixTaskResult(); if (!type.equals(MessageType.CONTROLLER_MSG.toString())) { - throw new HelixException("Unexpected msg type for message " + _message.getMsgId() + throw new HelixException("Unexpected msg type for message " + _message.getMsgIdString() + " type:" + _message.getMsgType()); } result.getTaskResultMap().put("ControllerResult", - "msg " + _message.getMsgId() + " from " + _message.getMsgSrc() + " processed"); + "msg " + _message.getMsgIdString() + " from " + _message.getMsgSrc() + " processed"); result.setSuccess(true); return result; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { - _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgId(), e); + _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgIdString(), e); } } } diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultParticipantErrorMessageHandlerFactory.java b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultParticipantErrorMessageHandlerFactory.java index d2e56eb948..a147ddb6ff 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultParticipantErrorMessageHandlerFactory.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultParticipantErrorMessageHandlerFactory.java @@ -99,7 +99,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { @Override public void onError(Exception e, ErrorCode code, ErrorType type) { - _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgId(), e); + _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgIdString(), e); } } @@ -109,7 +109,7 @@ public MessageHandler createHandler(Message message, NotificationContext context String type = message.getMsgType(); if (!type.equals(getMessageType())) { - throw new HelixException("Unexpected msg type for message " + message.getMsgId() + " type:" + throw new HelixException("Unexpected msg type for message " + message.getMsgIdString() + " type:" + message.getMsgType()); } diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java index 5451a8128c..a7fcc10367 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java @@ -76,7 +76,7 @@ public SchedulerAsyncCallback(Message originalMessage, HelixManager manager) { @Override public void onTimeOut() { - _logger.info("Scheduler msg timeout " + _originalMessage.getMsgId() + " timout with " + _logger.info("Scheduler msg timeout " + _originalMessage.getMsgIdString() + " timout with " + _timeout + " Ms"); _statusUpdateUtil.logError(_originalMessage, SchedulerAsyncCallback.class, "Task timeout", @@ -86,13 +86,13 @@ public void onTimeOut() { @Override public void onReplyMessage(Message message) { - _logger.info("Update for scheduler msg " + _originalMessage.getMsgId() + " Message " + _logger.info("Update for scheduler msg " + _originalMessage.getMsgIdString() + " Message " + message.getMsgSrc() + " id " + message.getCorrelationId() + " completed"); String key = "MessageResult " + message.getMsgSrc() + " " + UUID.randomUUID(); _resultSummaryMap.put(key, message.getResultMap()); if (this.isDone()) { - _logger.info("Scheduler msg " + _originalMessage.getMsgId() + " completed"); + _logger.info("Scheduler msg " + _originalMessage.getMsgIdString() + " completed"); _statusUpdateUtil.logInfo(_originalMessage, SchedulerAsyncCallback.class, "Scheduler task completed", _manager.getHelixDataAccessor()); addSummary(_resultSummaryMap, _originalMessage, _manager, false); @@ -111,12 +111,12 @@ private void addSummary(Map> _resultSummaryMap, ZNRecord statusUpdate = accessor.getProperty( keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - originalMessage.getMsgId())).getRecord(); + originalMessage.getMsgIdString())).getRecord(); statusUpdate.getMapFields().putAll(_resultSummaryMap); accessor.setProperty( keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - originalMessage.getMsgId()), new StatusUpdate(statusUpdate)); + originalMessage.getMsgIdString()), new StatusUpdate(statusUpdate)); } } @@ -133,7 +133,7 @@ public MessageHandler createHandler(Message message, NotificationContext context String type = message.getMsgType(); if (!type.equals(getMessageType())) { - throw new HelixException("Unexpected msg type for message " + message.getMsgId() + " type:" + throw new HelixException("Unexpected msg type for message " + message.getMsgIdString() + " type:" + message.getMsgType()); } @@ -206,7 +206,7 @@ void handleMessageUsingScheduledTaskQueue(Criteria recipientCriteria, Message me newAddedScheduledTasks.getRecord().setMapField(partitionId, task.getRecord().getSimpleFields()); _logger.info("Scheduling for controllerMsg " + controllerMsgId + " , sending task " - + partitionId + " " + task.getMsgId() + " to " + instanceName); + + partitionId + " " + task.getMsgIdString() + " to " + instanceName); if (_logger.isDebugEnabled()) { _logger.debug(task.getRecord().getSimpleFields()); @@ -223,16 +223,16 @@ void handleMessageUsingScheduledTaskQueue(Criteria recipientCriteria, Message me ZNRecord statusUpdate = accessor.getProperty( keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - _message.getMsgId())).getRecord(); + _message.getMsgIdString())).getRecord(); statusUpdate.getMapFields().put("SentMessageCount", sendSummary); accessor.updateProperty(keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - _message.getMsgId()), new StatusUpdate(statusUpdate)); + _message.getMsgIdString()), new StatusUpdate(statusUpdate)); } private int findTopPartitionId(IdealState currentTaskQueue) { int topId = 0; - for (String partitionName : currentTaskQueue.getPartitionSet()) { + for (String partitionName : currentTaskQueue.getPartitionStringSet()) { try { String partitionNumStr = partitionName.substring(partitionName.lastIndexOf('_') + 1); int num = Integer.parseInt(partitionNumStr); @@ -251,7 +251,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { String type = _message.getMsgType(); HelixTaskResult result = new HelixTaskResult(); if (!type.equals(MessageType.SCHEDULER_MSG.toString())) { - throw new HelixException("Unexpected msg type for message " + _message.getMsgId() + throw new HelixException("Unexpected msg type for message " + _message.getMsgIdString() + " type:" + _message.getMsgType()); } // Parse timeout value @@ -298,11 +298,11 @@ public HelixTaskResult handleMessage() throws InterruptedException { if (InstanceType.PARTICIPANT == recipientCriteria.getRecipientInstanceType() && hasSchedulerTaskQueue) { handleMessageUsingScheduledTaskQueue(recipientCriteria, messageTemplate, - _message.getMsgId()); + _message.getMsgIdString()); result.setSuccess(true); - result.getTaskResultMap().put(SCHEDULER_MSG_ID, _message.getMsgId()); + result.getTaskResultMap().put(SCHEDULER_MSG_ID, _message.getMsgIdString()); result.getTaskResultMap().put("ControllerResult", - "msg " + _message.getMsgId() + " from " + _message.getMsgSrc() + " processed"); + "msg " + _message.getMsgIdString() + " from " + _message.getMsgSrc() + " processed"); return result; } @@ -328,23 +328,23 @@ public HelixTaskResult handleMessage() throws InterruptedException { ZNRecord statusUpdate = accessor.getProperty( keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - _message.getMsgId())).getRecord(); + _message.getMsgIdString())).getRecord(); statusUpdate.getMapFields().put("SentMessageCount", sendSummary); accessor.setProperty(keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - _message.getMsgId()), new StatusUpdate(statusUpdate)); + _message.getMsgIdString()), new StatusUpdate(statusUpdate)); result.getTaskResultMap().put("ControllerResult", - "msg " + _message.getMsgId() + " from " + _message.getMsgSrc() + " processed"); - result.getTaskResultMap().put(SCHEDULER_MSG_ID, _message.getMsgId()); + "msg " + _message.getMsgIdString() + " from " + _message.getMsgSrc() + " processed"); + result.getTaskResultMap().put(SCHEDULER_MSG_ID, _message.getMsgIdString()); result.setSuccess(true); return result; } @Override public void onError(Exception e, ErrorCode code, ErrorType type) { - _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgId(), e); + _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgIdString(), e); } } } diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedControllerManager.java b/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedControllerManager.java index c9ad0f3b72..71a5c4a84e 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedControllerManager.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedControllerManager.java @@ -175,7 +175,7 @@ public boolean isLeader() { LiveInstance leader = _dataAccessor.getProperty(_keyBuilder.controllerLeader()); if (leader != null) { String leaderName = leader.getInstanceName(); - String sessionId = leader.getSessionId(); + String sessionId = leader.getSessionIdString(); if (leaderName != null && leaderName.equals(_instanceName) && sessionId != null && sessionId.equals(_sessionId)) { return true; diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedLeaderElection.java b/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedLeaderElection.java index 0ab83421d1..af33b03667 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedLeaderElection.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/DistributedLeaderElection.java @@ -138,7 +138,7 @@ private boolean tryUpdateController(HelixManager manager) { leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader != null) { - String leaderSessionId = leader.getSessionId(); + String leaderSessionId = leader.getSessionIdString(); LOG.info("Leader exists for cluster: " + manager.getClusterName() + ", currentLeader: " + leader.getInstanceName() + ", leaderSessionId: " + leaderSessionId); diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ParticipantManagerHelper.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ParticipantManagerHelper.java index 70dd592f44..ff932a9dd6 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ParticipantManagerHelper.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ParticipantManagerHelper.java @@ -151,14 +151,14 @@ public void createLiveInstance() { * update sessionId field in live-instance if necessary */ LiveInstance curLiveInstance = new LiveInstance(record); - if (!curLiveInstance.getSessionId().equals(_sessionId)) { + if (!curLiveInstance.getSessionIdString().equals(_sessionId)) { /** * in last handle-new-session, * live-instance is created by new zkconnection with stale session-id inside * just update session-id field */ LOG.info("overwriting session-id by ephemeralOwner: " + ephemeralOwner - + ", old-sessionId: " + curLiveInstance.getSessionId() + ", new-sessionId: " + + ", old-sessionId: " + curLiveInstance.getSessionIdString() + ", new-sessionId: " + _sessionId); curLiveInstance.setSessionId(_sessionId); @@ -231,7 +231,7 @@ public void carryOverPreviousCurrentState() { _keyBuilder.currentState(_instanceName, _sessionId, lastCurState.getResourceName()) .getPath(); _dataAccessor.getBaseDataAccessor().update(curStatePath, - new CurStateCarryOverUpdater(_sessionId, stateModel.getInitialState(), lastCurState), + new CurStateCarryOverUpdater(_sessionId, stateModel.getInitialStateString(), lastCurState), AccessOption.PERSISTENT); } } diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java index 754df7be0b..8172c61050 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java @@ -326,7 +326,7 @@ public void resetPartition(String clusterName, String instanceName, String resou } // check partition is in ERROR state - String sessionId = liveInstance.getSessionId(); + String sessionId = liveInstance.getSessionIdString(); CurrentState curState = accessor.getProperty(keyBuilder.currentState(instanceName, sessionId, resourceName)); for (String partitionName : resetPartitionNames) { @@ -348,7 +348,7 @@ public void resetPartition(String clusterName, String instanceName, String resou List messages = accessor.getChildValues(keyBuilder.messages(instanceName)); for (Message message : messages) { if (!MessageType.STATE_TRANSITION.toString().equalsIgnoreCase(message.getMsgType()) - || !sessionId.equals(message.getTgtSessionId()) + || !sessionId.equals(message.getTgtSessionIdString()) || !resourceName.equals(message.getResourceName()) || !resetPartitionNames.contains(message.getPartitionName())) { continue; @@ -381,7 +381,7 @@ public void resetPartition(String clusterName, String instanceName, String resou message.setTgtSessionId(sessionId); message.setStateModelDef(stateModelDef); message.setFromState(HelixDefinedState.ERROR.toString()); - message.setToState(stateModel.getInitialState()); + message.setToState(stateModel.getInitialStateString()); message.setStateModelFactoryName(idealState.getStateModelFactoryName()); resetMessages.add(message); @@ -986,7 +986,7 @@ void rebalance(String clusterName, String resourceName, int replica, String keyP } // StateModelDefinition def = new StateModelDefinition(stateModDef); - List statePriorityList = stateModDef.getStatesPriorityList(); + List statePriorityList = stateModDef.getStatesPriorityStringList(); String masterStateValue = null; String slaveStateValue = null; @@ -1144,7 +1144,7 @@ public ClusterConstraints getConstraints(String clusterName, ConstraintType cons @Override public void rebalance(String clusterName, IdealState currentIdealState, List instanceNames) { Set activeInstances = new HashSet(); - for (String partition : currentIdealState.getPartitionSet()) { + for (String partition : currentIdealState.getPartitionStringSet()) { activeInstances.addAll(currentIdealState.getRecord().getListField(partition)); } instanceNames.removeAll(activeInstances); diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixDataAccessor.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixDataAccessor.java index 025402dcb8..087d2fb4f2 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixDataAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixDataAccessor.java @@ -76,7 +76,7 @@ public boolean createProperty(PropertyKey key, T value PropertyType type = key.getType(); String path = key.getPath(); int options = constructOptions(type); - return _baseDataAccessor.create(path, value.getRecord(), options); + return _baseDataAccessor.create(path, value == null ? null : value.getRecord(), options); } @Override diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixManager.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixManager.java index 621c18b47e..715d683304 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixManager.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixManager.java @@ -839,7 +839,7 @@ private void carryOverPreviousCurrentState() { keyBuilder.currentState(_instanceName, _sessionId, lastCurState.getResourceName()) .getPath(); _helixAccessor.getBaseDataAccessor().update(curStatePath, - new CurStateCarryOverUpdater(_sessionId, stateModel.getInitialState(), lastCurState), + new CurStateCarryOverUpdater(_sessionId, stateModel.getInitialStateString(), lastCurState), AccessOption.PERSISTENT); } } diff --git a/helix-core/src/main/java/org/apache/helix/messaging/AsyncCallback.java b/helix-core/src/main/java/org/apache/helix/messaging/AsyncCallback.java index f9743a42b6..0f031dc05e 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/AsyncCallback.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/AsyncCallback.java @@ -71,7 +71,7 @@ public void setInterrupted(boolean b) { } public synchronized final void onReply(Message message) { - _logger.info("OnReply msg " + message.getMsgId()); + _logger.info("OnReply msg " + message.getMsgIdString()); if (!isDone()) { _messageReplied.add(message); try { diff --git a/helix-core/src/main/java/org/apache/helix/messaging/DefaultMessagingService.java b/helix-core/src/main/java/org/apache/helix/messaging/DefaultMessagingService.java index 2eec354244..c22abb8b39 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/DefaultMessagingService.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/DefaultMessagingService.java @@ -157,7 +157,7 @@ public Map> generateMessage(final Criteria recipient List liveInstances = accessor.getChildValues(keyBuilder.liveInstances()); for (LiveInstance liveInstance : liveInstances) { - sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId()); + sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionIdString()); } } for (Map map : matchedList) { diff --git a/helix-core/src/main/java/org/apache/helix/messaging/handling/AsyncCallbackService.java b/helix-core/src/main/java/org/apache/helix/messaging/handling/AsyncCallbackService.java index c218a159e6..8375420290 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/handling/AsyncCallbackService.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/handling/AsyncCallbackService.java @@ -56,14 +56,14 @@ public void registerAsyncCallback(String correlationId, AsyncCallback callback) void verifyMessage(Message message) { if (!message.getMsgType().toString().equalsIgnoreCase(MessageType.TASK_REPLY.toString())) { String errorMsg = - "Unexpected msg type for message " + message.getMsgId() + " type:" + message.getMsgType() + "Unexpected msg type for message " + message.getMsgIdString() + " type:" + message.getMsgType() + " Expected : " + MessageType.TASK_REPLY; _logger.error(errorMsg); throw new HelixException(errorMsg); } String correlationId = message.getCorrelationId(); if (correlationId == null) { - String errorMsg = "Message " + message.getMsgId() + " does not have correlation id"; + String errorMsg = "Message " + message.getMsgIdString() + " does not have correlation id"; _logger.error(errorMsg); throw new HelixException(errorMsg); } @@ -71,13 +71,13 @@ void verifyMessage(Message message) { if (!_callbackMap.containsKey(correlationId)) { String errorMsg = "Message " - + message.getMsgId() + + message.getMsgIdString() + " does not have correponding callback. Probably timed out already. Correlation id: " + correlationId; _logger.error(errorMsg); throw new HelixException(errorMsg); } - _logger.info("Verified reply message " + message.getMsgId() + " correlation:" + correlationId); + _logger.info("Verified reply message " + message.getMsgIdString() + " correlation:" + correlationId); } @Override @@ -110,7 +110,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { verifyMessage(_message); HelixTaskResult result = new HelixTaskResult(); assert (_correlationId.equalsIgnoreCase(_message.getCorrelationId())); - _logger.info("invoking reply message " + _message.getMsgId() + ", correlationid:" + _logger.info("invoking reply message " + _message.getMsgIdString() + ", correlationid:" + _correlationId); AsyncCallback callback = _callbackMap.get(_correlationId); @@ -127,7 +127,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { @Override public void onError(Exception e, ErrorCode code, ErrorType type) { - _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgId(), e); + _logger.error("Message handling pipeline get an exception. MsgId:" + _message.getMsgIdString(), e); } } } diff --git a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java index 627babcb11..4c634b9b89 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java @@ -93,7 +93,7 @@ void preHandleMessage() throws Exception { HelixDataAccessor accessor = _manager.getHelixDataAccessor(); String partitionName = _message.getPartitionName(); - String fromState = _message.getFromState(); + String fromState = _message.getFromStateString(); // Verify the fromState and current state of the stateModel String state = _currentStateDelta.getState(partitionName); @@ -119,7 +119,7 @@ void postHandleMessage() { String partitionKey = _message.getPartitionName(); String resource = _message.getResourceName(); - String sessionId = _message.getTgtSessionId(); + String sessionId = _message.getTgtSessionIdString(); String instanceName = _manager.getInstanceName(); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); @@ -132,15 +132,15 @@ void postHandleMessage() { // new session // sessionId might change when we update the state model state. // for zk current state it is OK as we have the per-session current state node - if (!_message.getTgtSessionId().equals(_manager.getSessionId())) { + if (!_message.getTgtSessionIdString().equals(_manager.getSessionId())) { logger.warn("Session id has changed. Skip postExecutionMessage. Old session " - + _message.getExecutionSessionId() + " , new session : " + _manager.getSessionId()); + + _message.getExecutionSessionIdString() + " , new session : " + _manager.getSessionId()); return; } if (taskResult.isSuccess()) { // String fromState = message.getFromState(); - String toState = _message.getToState(); + String toState = _message.getToStateString(); _currentStateDelta.setState(partitionKey, toState); if (toState.equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) { @@ -178,7 +178,7 @@ void postHandleMessage() { // state in this case logger .error("State transition interrupted but not timeout. Not updating state. Partition : " - + _message.getPartitionName() + " MsgId : " + _message.getMsgId()); + + _message.getPartitionName() + " MsgId : " + _message.getMsgIdString()); return; } } @@ -187,7 +187,7 @@ void postHandleMessage() { _stateModel.updateState(HelixDefinedState.ERROR.toString()); // if we have errors transit from ERROR state, disable the partition - if (_message.getFromState().equalsIgnoreCase(HelixDefinedState.ERROR.toString())) { + if (_message.getFromStateString().equalsIgnoreCase(HelixDefinedState.ERROR.toString())) { disablePartition(); } } @@ -226,7 +226,7 @@ void disablePartition() { HelixAdmin admin = _manager.getClusterManagmentTool(); admin.enablePartition(false, clusterName, instanceName, resourceName, Arrays.asList(partitionName)); - logger.info("error in transit from ERROR to " + _message.getToState() + " for partition: " + logger.info("error in transit from ERROR to " + _message.getToStateString() + " for partition: " + partitionName + ". disable it on " + instanceName); } @@ -285,8 +285,8 @@ private void invoke(HelixDataAccessor accessor, NotificationContext context, // by default, we invoke state transition function in state model Method methodToInvoke = null; - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); methodToInvoke = _transitionMethodFinder.getMethodForTransition(_stateModel.getClass(), fromState, toState, new Class[] { @@ -332,11 +332,11 @@ public void onError(Exception e, ErrorCode code, ErrorType type) { _stateModel.updateState(HelixDefinedState.ERROR.toString()); // if transit from ERROR state, disable the partition - if (_message.getFromState().equalsIgnoreCase(HelixDefinedState.ERROR.toString())) { + if (_message.getFromStateString().equalsIgnoreCase(HelixDefinedState.ERROR.toString())) { disablePartition(); } accessor.updateProperty( - keyBuilder.currentState(instanceName, _message.getTgtSessionId(), resourceName), + keyBuilder.currentState(instanceName, _message.getTgtSessionIdString(), resourceName), currentStateDelta); } } finally { diff --git a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTask.java b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTask.java index d9f7ae23f1..dd8612e46f 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTask.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTask.java @@ -98,14 +98,14 @@ public HelixTaskResult call() { _statusUpdateUtil.logError(_message, HelixTask.class, e, "State transition interrupted, timeout:" + _isTimeout, accessor); - logger.info("Message " + _message.getMsgId() + " is interrupted"); + logger.info("Message " + _message.getMsgIdString() + " is interrupted"); } catch (Exception e) { taskResult = new HelixTaskResult(); taskResult.setException(e); taskResult.setMessage(e.getMessage()); String errorMessage = - "Exception while executing a message. " + e + " msgId: " + _message.getMsgId() + "Exception while executing a message. " + e + " msgId: " + _message.getMsgIdString() + " type: " + _message.getMsgType(); logger.error(errorMessage, e); _statusUpdateUtil.logError(_message, HelixTask.class, e, errorMessage, accessor); @@ -119,17 +119,17 @@ public HelixTaskResult call() { if (taskResult.isSuccess()) { _statusUpdateUtil.logInfo(_message, _handler.getClass(), "Message handling task completed successfully", accessor); - logger.info("Message " + _message.getMsgId() + " completed."); + logger.info("Message " + _message.getMsgIdString() + " completed."); } else { type = ErrorType.INTERNAL; if (taskResult.isInterrupted()) { - logger.info("Message " + _message.getMsgId() + " is interrupted"); + logger.info("Message " + _message.getMsgIdString() + " is interrupted"); code = _isTimeout ? ErrorCode.TIMEOUT : ErrorCode.CANCEL; if (_isTimeout) { int retryCount = _message.getRetryCount(); logger.info("Message timeout, retry count: " + retryCount + " msgId:" - + _message.getMsgId()); + + _message.getMsgIdString()); _statusUpdateUtil.logInfo(_message, _handler.getClass(), "Message handling task timeout, retryCount:" + retryCount, accessor); // Notify the handler that timeout happens, and the number of retries left @@ -166,12 +166,12 @@ public HelixTaskResult call() { code = ErrorCode.ERROR; String errorMessage = - "Exception after executing a message, msgId: " + _message.getMsgId() + e; + "Exception after executing a message, msgId: " + _message.getMsgIdString() + e; logger.error(errorMessage, e); _statusUpdateUtil.logError(_message, HelixTask.class, errorMessage, accessor); } finally { long end = System.currentTimeMillis(); - logger.info("msg: " + _message.getMsgId() + " handling task completed, results:" + logger.info("msg: " + _message.getMsgIdString() + " handling task completed, results:" + taskResult.isSuccess() + ", at: " + end + ", took:" + (end - start)); // Notify the handler about any error happened in the handling procedure, so that @@ -190,9 +190,9 @@ private void removeMessageFromZk(HelixDataAccessor accessor, Message message) { Builder keyBuilder = accessor.keyBuilder(); if (message.getTgtName().equalsIgnoreCase("controller")) { // TODO: removeProperty returns boolean - accessor.removeProperty(keyBuilder.controllerMessage(message.getMsgId())); + accessor.removeProperty(keyBuilder.controllerMessage(message.getMsgIdString())); } else { - accessor.removeProperty(keyBuilder.message(_manager.getInstanceName(), message.getMsgId())); + accessor.removeProperty(keyBuilder.message(_manager.getInstanceName(), message.getMsgIdString())); } } @@ -214,11 +214,11 @@ private void sendReply(HelixDataAccessor accessor, Message message, HelixTaskRes if (message.getSrcInstanceType() == InstanceType.PARTICIPANT) { Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.message(message.getMsgSrc(), replyMessage.getMsgId()), + accessor.setProperty(keyBuilder.message(message.getMsgSrc(), replyMessage.getMsgIdString()), replyMessage); } else if (message.getSrcInstanceType() == InstanceType.CONTROLLER) { Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.controllerMessage(replyMessage.getMsgId()), replyMessage); + accessor.setProperty(keyBuilder.controllerMessage(replyMessage.getMsgIdString()), replyMessage); } _statusUpdateUtil.logInfo(message, HelixTask.class, "1 msg replied to " + replyMessage.getTgtName(), accessor); @@ -237,8 +237,8 @@ private void reportMessageStat(HelixManager manager, Message message, HelixTaskR long totalDelay = now - msgReadTime; long executionDelay = now - msgExecutionStartTime; if (totalDelay > 0 && executionDelay > 0) { - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String transition = fromState + "--" + toState; StateTransitionContext cxt = diff --git a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTaskExecutor.java b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTaskExecutor.java index 600a3ab9ca..276c27f455 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTaskExecutor.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixTaskExecutor.java @@ -430,14 +430,14 @@ public void onMessage(String instanceName, List messages, continue; } - String tgtSessionId = message.getTgtSessionId(); + String tgtSessionId = message.getTgtSessionIdString(); // sessionId mismatch normally means message comes from expired session, just remove it if (!sessionId.equals(tgtSessionId) && !tgtSessionId.equals("*")) { String warningMessage = "SessionId does NOT match. expected sessionId: " + sessionId + ", tgtSessionId in message: " + tgtSessionId + ", messageId: " - + message.getMsgId(); + + message.getMsgIdString(); LOG.warn(warningMessage); accessor.removeProperty(message.getKey(keyBuilder, instanceName)); _statusUpdateUtil.logWarning(message, HelixStateMachineEngine.class, warningMessage, @@ -452,7 +452,7 @@ public void onMessage(String instanceName, List messages, // We will read the message again if there is a new message but we // check for the status and ignore if its already read if (LOG.isTraceEnabled()) { - LOG.trace("Message already read. msgId: " + message.getMsgId()); + LOG.trace("Message already read. msgId: " + message.getMsgIdString()); } continue; } @@ -465,9 +465,9 @@ public void onMessage(String instanceName, List messages, } handlers.add(createHandler); } catch (Exception e) { - LOG.error("Failed to create message handler for " + message.getMsgId(), e); + LOG.error("Failed to create message handler for " + message.getMsgIdString(), e); String error = - "Failed to create message handler for " + message.getMsgId() + ", exception: " + e; + "Failed to create message handler for " + message.getMsgIdString() + ", exception: " + e; _statusUpdateUtil.logError(message, HelixStateMachineEngine.class, e, error, accessor); @@ -543,7 +543,7 @@ public MessageHandler createMessageHandler(Message message, NotificationContext // the corresponding MessageHandlerFactory is registered if (handlerFactory == null) { LOG.warn("Fail to find message handler factory for type: " + msgType + " msgId: " - + message.getMsgId()); + + message.getMsgIdString()); return null; } diff --git a/helix-core/src/main/java/org/apache/helix/messaging/handling/MessageTimeoutTask.java b/helix-core/src/main/java/org/apache/helix/messaging/handling/MessageTimeoutTask.java index e1b4f0f964..17f525efd3 100644 --- a/helix-core/src/main/java/org/apache/helix/messaging/handling/MessageTimeoutTask.java +++ b/helix-core/src/main/java/org/apache/helix/messaging/handling/MessageTimeoutTask.java @@ -40,7 +40,7 @@ public void run() { Message message = _task.getMessage(); // NotificationContext context = _task.getNotificationContext(); // System.out.println("msg: " + message.getMsgId() + " timeouot."); - LOG.warn("Message time out, canceling. id:" + message.getMsgId() + " timeout : " + LOG.warn("Message time out, canceling. id:" + message.getMsgIdString() + " timeout : " + message.getExecutionTimeout()); _task.onTimeout(); _executor.cancelTask(_task); diff --git a/helix-core/src/main/java/org/apache/helix/model/ClusterConstraints.java b/helix-core/src/main/java/org/apache/helix/model/ClusterConstraints.java index f69a7ce1ae..3b7557d142 100644 --- a/helix-core/src/main/java/org/apache/helix/model/ClusterConstraints.java +++ b/helix-core/src/main/java/org/apache/helix/model/ClusterConstraints.java @@ -164,8 +164,8 @@ public static Map toConstraintAttributes(Message ms String msgType = msg.getMsgType(); attributes.put(ConstraintAttribute.MESSAGE_TYPE, msgType); if (MessageType.STATE_TRANSITION.toString().equals(msgType)) { - if (msg.getFromState() != null && msg.getToState() != null) { - attributes.put(ConstraintAttribute.TRANSITION, msg.getFromState() + "-" + msg.getToState()); + if (msg.getFromStateString() != null && msg.getToStateString() != null) { + attributes.put(ConstraintAttribute.TRANSITION, msg.getFromStateString() + "-" + msg.getToStateString()); } if (msg.getResourceName() != null) { attributes.put(ConstraintAttribute.RESOURCE, msg.getResourceName()); diff --git a/helix-core/src/main/java/org/apache/helix/model/CurrentState.java b/helix-core/src/main/java/org/apache/helix/model/CurrentState.java index 32854ab717..dba439b3b3 100644 --- a/helix-core/src/main/java/org/apache/helix/model/CurrentState.java +++ b/helix-core/src/main/java/org/apache/helix/model/CurrentState.java @@ -25,6 +25,12 @@ import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; +import org.apache.helix.api.Id; +import org.apache.helix.api.PartitionId; +import org.apache.helix.api.ResourceId; +import org.apache.helix.api.SessionId; +import org.apache.helix.api.State; +import org.apache.helix.api.StateModelDefId; import org.apache.log4j.Logger; /** @@ -69,11 +75,19 @@ public String getResourceName() { return _record.getId(); } + /** + * Get the resource id + * @return ResourceId + */ + public ResourceId getResourceId() { + return Id.resource(getResourceName()); + } + /** * Get the partitions on this instance and the state that each partition is currently in. * @return (partition, state) pairs */ - public Map getPartitionStateMap() { + public Map getPartitionStateStringMap() { Map map = new HashMap(); Map> mapFields = _record.getMapFields(); for (String partitionName : mapFields.keySet()) { @@ -85,14 +99,38 @@ public Map getPartitionStateMap() { return map; } + /** + * Get the partitions on this instance and the state that each partition is currently in + * @return (partition id, state) pairs + */ + public Map getPartitionStateMap() { + Map map = new HashMap(); + for (String partitionName : _record.getMapFields().keySet()) { + Map stateMap = _record.getMapField(partitionName); + if (stateMap != null) { + map.put(Id.partition(partitionName), + State.from(stateMap.get(CurrentStateProperty.CURRENT_STATE.toString()))); + } + } + return map; + } + /** * Get the session that this current state corresponds to * @return String session identifier */ - public String getSessionId() { + public String getSessionIdString() { return _record.getSimpleField(CurrentStateProperty.SESSION_ID.toString()); } + /** + * Get the session that this current state corresponds to + * @return session identifier + */ + public SessionId getSessionId() { + return Id.session(getSessionIdString()); + } + /** * Set the session that this current state corresponds to * @param sessionId String session identifier @@ -115,6 +153,15 @@ public String getState(String partitionName) { return null; } + /** + * Get the state of a partition on this instance + * @param partitionId partition id + * @return State + */ + public State getState(PartitionId partitionId) { + return State.from(getState(partitionId.stringify())); + } + /** * Set the state model that the resource follows * @param stateModelName an identifier of the state model @@ -131,6 +178,14 @@ public String getStateModelDefRef() { return _record.getSimpleField(CurrentStateProperty.STATE_MODEL_DEF.toString()); } + /** + * Get the state model that the resource follows + * @return an identifier of the state model + */ + public StateModelDefId getStateModelDefId() { + return Id.stateModelDef(getStateModelDefRef()); + } + /** * Set the state that a partition is currently in on this instance * @param partitionName the name of the partition @@ -195,7 +250,7 @@ public boolean isValid() { LOG.error("Current state does not contain state model ref. id:" + getResourceName()); return false; } - if (getSessionId() == null) { + if (getSessionIdString() == null) { LOG.error("CurrentState does not contain session id, id : " + getResourceName()); return false; } diff --git a/helix-core/src/main/java/org/apache/helix/model/ExternalView.java b/helix-core/src/main/java/org/apache/helix/model/ExternalView.java index d5f1afc7ae..c7ff8bb02d 100644 --- a/helix-core/src/main/java/org/apache/helix/model/ExternalView.java +++ b/helix-core/src/main/java/org/apache/helix/model/ExternalView.java @@ -25,6 +25,14 @@ import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; +import org.apache.helix.api.Id; +import org.apache.helix.api.ParticipantId; +import org.apache.helix.api.PartitionId; +import org.apache.helix.api.ResourceId; +import org.apache.helix.api.State; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; /** * External view is an aggregation (across all instances) @@ -74,10 +82,22 @@ public void setStateMap(String partitionName, Map currentStateMa * Get all the partitions of the resource * @return a set of partition names */ - public Set getPartitionSet() { + public Set getPartitionStringSet() { return _record.getMapFields().keySet(); } + /** + * Get all the partitions of the resource + * @return a set of partition ids + */ + public Set getPartitionSet() { + ImmutableSet.Builder builder = new ImmutableSet.Builder(); + for (String partitionName : getPartitionStringSet()) { + builder.add(Id.partition(partitionName)); + } + return builder.build(); + } + /** * Get the instance and the state for each partition replica * @param partitionName the partition to look up @@ -87,6 +107,21 @@ public Map getStateMap(String partitionName) { return _record.getMapField(partitionName); } + /** + * Get the participant and the state for each partition replica + * @param partitionId the partition to look up + * @return (participant, state) pairs + */ + public Map getStateMap(PartitionId partitionId) { + Map rawStateMap = getStateMap(partitionId.stringify()); + ImmutableMap.Builder builder = + new ImmutableMap.Builder(); + for (String participantName : rawStateMap.keySet()) { + builder.put(Id.participant(participantName), State.from(rawStateMap.get(participantName))); + } + return builder.build(); + } + /** * Get the resource represented by this view * @return the name of the resource @@ -95,6 +130,14 @@ public String getResourceName() { return _record.getId(); } + /** + * Get the resource represented by this view + * @return resource id + */ + public ResourceId getResourceId() { + return Id.resource(getResourceName()); + } + @Override public boolean isValid() { return true; diff --git a/helix-core/src/main/java/org/apache/helix/model/IdealState.java b/helix-core/src/main/java/org/apache/helix/model/IdealState.java index e14940a17e..cbf7a64508 100644 --- a/helix-core/src/main/java/org/apache/helix/model/IdealState.java +++ b/helix-core/src/main/java/org/apache/helix/model/IdealState.java @@ -30,9 +30,20 @@ import org.apache.helix.HelixConstants; import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; +import org.apache.helix.api.Id; +import org.apache.helix.api.ParticipantId; +import org.apache.helix.api.PartitionId; +import org.apache.helix.api.RebalancerRef; +import org.apache.helix.api.ResourceId; +import org.apache.helix.api.State; +import org.apache.helix.api.StateModelDefId; import org.apache.helix.controller.rebalancer.Rebalancer; import org.apache.log4j.Logger; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + /** * The ideal states of all partitions in a resource */ @@ -106,6 +117,14 @@ public String getResourceName() { return _record.getId(); } + /** + * Get the associated resource + * @return the id of the resource + */ + public ResourceId getResourceId() { + return Id.resource(getResourceName()); + } + /** * Get the rebalance mode of the ideal state * @param mode {@link IdealStateModeProperty} @@ -153,6 +172,18 @@ public String getRebalancerClassName() { return _record.getSimpleField(IdealStateProperty.REBALANCER_CLASS_NAME.toString()); } + /** + * Get a reference to the user-defined rebalancer associated with this resource(if any) + * @return + */ + public RebalancerRef getRebalancerRef() { + String className = getRebalancerClassName(); + if (className != null) { + return RebalancerRef.from(getRebalancerClassName()); + } + return null; + } + /** * Set the maximum number of partitions of this resource that an instance can serve * @param max the maximum number of partitions supported @@ -204,7 +235,7 @@ public void setPartitionState(String partitionName, String instanceName, String * Get all of the partitions * @return a set of partition names */ - public Set getPartitionSet() { + public Set getPartitionStringSet() { if (getRebalanceMode() == RebalanceMode.SEMI_AUTO || getRebalanceMode() == RebalanceMode.FULL_AUTO) { return _record.getListFields().keySet(); @@ -217,6 +248,18 @@ public Set getPartitionSet() { } } + /** + * Get all of the partitions + * @return an immutable set of partitions + */ + public Set getPartitionSet() { + ImmutableSet.Builder partitionSetBuilder = new ImmutableSet.Builder(); + for (String partitionName : getPartitionStringSet()) { + partitionSetBuilder.add(Id.partition(partitionName)); + } + return partitionSetBuilder.build(); + } + /** * Set the current mapping of a partition * @param partition the partition to set @@ -235,6 +278,21 @@ public Map getInstanceStateMap(String partitionName) { return _record.getMapField(partitionName); } + /** + * Get the current mapping of a partition + * @param partitionId the name of the partition + * @return the instances where the replicas live and the state of each (immutable) + */ + public Map getParticipantStateMap(PartitionId partitionId) { + Map instanceStateMap = getInstanceStateMap(partitionId.stringify()); + ImmutableMap.Builder builder = + new ImmutableMap.Builder(); + for (String participantId : instanceStateMap.keySet()) { + builder.put(Id.participant(participantId), State.from(instanceStateMap.get(participantId))); + } + return builder.build(); + } + /** * Get the instances who host replicas of a partition * @param partitionName the partition to look up @@ -263,7 +321,19 @@ public Set getInstanceSet(String partitionName) { logger.error("Invalid ideal state mode: " + getResourceName()); return Collections.emptySet(); } + } + /** + * Get the participants who host replicas of a partition + * @param partitionId the partition to look up + * @return immutable set of participant ids + */ + public Set getParticipantSet(PartitionId partitionId) { + ImmutableSet.Builder builder = new ImmutableSet.Builder(); + for (String participantName : getInstanceSet(partitionId.stringify())) { + builder.add(Id.participant(participantName)); + } + return builder.build(); } /** @@ -290,6 +360,20 @@ public List getPreferenceList(String partitionName) { return null; } + /** + * Get the preference list of a partition + * @param partitionId the partition id + * @return an ordered list of participants that can serve replicas of the partition + */ + public List getPreferenceList(PartitionId partitionId) { + ImmutableList.Builder builder = new ImmutableList.Builder(); + List preferenceStringList = getPreferenceList(partitionId.stringify()); + for (String participantName : preferenceStringList) { + builder.add(Id.participant(participantName)); + } + return builder.build(); + } + /** * Get the state model associated with this resource * @return an identifier of the state model @@ -298,6 +382,14 @@ public String getStateModelDefRef() { return _record.getSimpleField(IdealStateProperty.STATE_MODEL_DEF_REF.toString()); } + /** + * Get the state model associated with this resource + * @return an identifier of the state model + */ + public StateModelDefId getStateModelDefId() { + return Id.stateModelDef(getStateModelDefRef()); + } + /** * Set the state model associated with this resource * @param stateModel state model identifier @@ -426,7 +518,7 @@ public boolean isValid() { if (!replicaStr.equals(HelixConstants.StateModelToken.ANY_LIVEINSTANCE.toString())) { int replica = Integer.parseInt(replicaStr); - Set partitionSet = getPartitionSet(); + Set partitionSet = getPartitionStringSet(); for (String partition : partitionSet) { List preferenceList = getPreferenceList(partition); if (preferenceList == null || preferenceList.size() != replica) { diff --git a/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java b/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java index eb1c652f94..29059653bb 100644 --- a/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java +++ b/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java @@ -27,6 +27,8 @@ import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; +import org.apache.helix.api.Id; +import org.apache.helix.api.ParticipantId; import org.apache.log4j.Logger; /** @@ -238,6 +240,14 @@ public String getInstanceName() { return _record.getId(); } + /** + * Get the identifier of this participant + * @return the participant id + */ + public ParticipantId getParticipantId() { + return Id.participant(getInstanceName()); + } + @Override public boolean isValid() { // HELIX-65: remove check for hostname/port existence diff --git a/helix-core/src/main/java/org/apache/helix/model/LiveInstance.java b/helix-core/src/main/java/org/apache/helix/model/LiveInstance.java index 75e0cf398f..86eb2d806e 100644 --- a/helix-core/src/main/java/org/apache/helix/model/LiveInstance.java +++ b/helix-core/src/main/java/org/apache/helix/model/LiveInstance.java @@ -21,6 +21,11 @@ import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; +import org.apache.helix.api.HelixVersion; +import org.apache.helix.api.Id; +import org.apache.helix.api.ParticipantId; +import org.apache.helix.api.ProcId; +import org.apache.helix.api.SessionId; import org.apache.log4j.Logger; /** @@ -67,10 +72,18 @@ public void setSessionId(String sessionId) { * Get the session that this instance corresponds to * @return session identifier */ - public String getSessionId() { + public String getSessionIdString() { return _record.getSimpleField(LiveInstanceProperty.SESSION_ID.toString()); } + /** + * Get the session that this participant corresponds to + * @return session identifier + */ + public SessionId getSessionId() { + return Id.session(getSessionIdString()); + } + /** * Get the name of this instance * @return the instance name @@ -79,14 +92,30 @@ public String getInstanceName() { return _record.getId(); } + /** + * Get the id of this participant + * @return participant id + */ + public ParticipantId getParticipantId() { + return Id.participant(getInstanceName()); + } + /** * Get the version of Helix that this instance is running * @return the version */ - public String getHelixVersion() { + public String getHelixVersionString() { return _record.getSimpleField(LiveInstanceProperty.HELIX_VERSION.toString()); } + /** + * Get the version of Helix that this participant is running + * @return the version + */ + public HelixVersion getHelixVersion() { + return HelixVersion.from(getHelixVersionString()); + } + /** * Set the version of Helix that this instance is running * @param helixVersion the version @@ -103,6 +132,14 @@ public String getLiveInstance() { return _record.getSimpleField(LiveInstanceProperty.LIVE_INSTANCE.toString()); } + /** + * Get an identifier that represents the instance and where it is located + * @return process identifier + */ + public ProcId getProcessId() { + return Id.process(getLiveInstance()); + } + /** * Set an identifier that represents the process * @param liveInstance process identifier, e.g. process_id@host @@ -137,11 +174,11 @@ public void setWebserviceUrl(String url) { @Override public boolean isValid() { - if (getSessionId() == null) { + if (getSessionIdString() == null) { _logger.error("liveInstance does not have session id. id:" + _record.getId()); return false; } - if (getHelixVersion() == null) { + if (getHelixVersionString() == null) { _logger.error("liveInstance does not have CLM verion. id:" + _record.getId()); return false; } diff --git a/helix-core/src/main/java/org/apache/helix/model/Message.java b/helix-core/src/main/java/org/apache/helix/model/Message.java index d599b8bf09..5bc4a46340 100644 --- a/helix-core/src/main/java/org/apache/helix/model/Message.java +++ b/helix-core/src/main/java/org/apache/helix/model/Message.java @@ -31,8 +31,17 @@ import org.apache.helix.HelixProperty; import org.apache.helix.InstanceType; import org.apache.helix.PropertyKey; -import org.apache.helix.ZNRecord; import org.apache.helix.PropertyKey.Builder; +import org.apache.helix.ZNRecord; +import org.apache.helix.api.Id; +import org.apache.helix.api.MessageId; +import org.apache.helix.api.PartitionId; +import org.apache.helix.api.ResourceId; +import org.apache.helix.api.SessionId; +import org.apache.helix.api.State; +import org.apache.helix.api.StateModelDefId; + +import com.google.common.collect.ImmutableList; /** * Messages sent internally among nodes in the system to respond to changes in state. @@ -196,10 +205,18 @@ public String getMsgType() { * Get the session identifier of the destination node * @return session identifier */ - public String getTgtSessionId() { + public String getTgtSessionIdString() { return _record.getSimpleField(Attributes.TGT_SESSION_ID.toString()); } + /** + * Get the session identifier of the destination node + * @return session identifier + */ + public SessionId getTgtSessionId() { + return Id.session(getTgtSessionIdString()); + } + /** * Set the session identifier of the destination node * @param tgtSessionId session identifier @@ -212,10 +229,18 @@ public void setTgtSessionId(String tgtSessionId) { * Get the session identifier of the source node * @return session identifier */ - public String getSrcSessionId() { + public String getSrcSessionIdString() { return _record.getSimpleField(Attributes.SRC_SESSION_ID.toString()); } + /** + * Get the session identifier of the source node + * @return session identifier + */ + public SessionId getSrcSessionId() { + return Id.session(getSrcSessionIdString()); + } + /** * Set the session identifier of the source node * @param srcSessionId session identifier @@ -228,10 +253,22 @@ public void setSrcSessionId(String srcSessionId) { * Get the session identifier of the node that executes the message * @return session identifier */ - public String getExecutionSessionId() { + public String getExecutionSessionIdString() { return _record.getSimpleField(Attributes.EXE_SESSION_ID.toString()); } + /** + * Get the session identifier of the node that executes the message + * @return session identifier + */ + public SessionId getExecutionSessionId() { + String sessionId = getExecutionSessionIdString(); + if (sessionId != null) { + return Id.session(sessionId); + } + return null; + } + /** * Set the session identifier of the node that executes the message * @param exeSessionId session identifier @@ -312,10 +349,18 @@ public void setPartitionName(String partitionName) { * Get the unique identifier of this message * @return message identifier */ - public String getMsgId() { + public String getMsgIdString() { return _record.getSimpleField(Attributes.MSG_ID.toString()); } + /** + * Get the unique identifier of this message + * @return message identifier + */ + public MessageId getMsgId() { + return Id.message(getMsgIdString()); + } + /** * Set the unique identifier of this message * @param msgId message identifier @@ -336,10 +381,18 @@ public void setFromState(String state) { * Get the "from-state" for transition-related messages * @return state name, or null for other message types */ - public String getFromState() { + public String getFromStateString() { return _record.getSimpleField(Attributes.FROM_STATE.toString()); } + /** + * Get the "from-state" for transition-related messages + * @return state, or null for other message types + */ + public State getFromState() { + return State.from(getFromStateString()); + } + /** * Set the "to state" for transition-related messages * @param state the state name @@ -352,10 +405,18 @@ public void setToState(String state) { * Get the "to state" for transition-related messages * @return state name, or null for other message types */ - public String getToState() { + public String getToStateString() { return _record.getSimpleField(Attributes.TO_STATE.toString()); } + /** + * Get the "to state" for transition-related messages + * @return state, or null for other message types + */ + public State getToState() { + return State.from(getToStateString()); + } + /** * Set the instance for which this message is targeted * @param msgTgt instance name @@ -396,6 +457,14 @@ public String getResourceName() { return _record.getSimpleField(Attributes.RESOURCE_NAME.toString()); } + /** + * Get the resource associated with this message + * @return resource id + */ + public ResourceId getResourceId() { + return Id.resource(getResourceName()); + } + /** * Get the resource partition associated with this message * @return partition name @@ -404,6 +473,14 @@ public String getPartitionName() { return _record.getSimpleField(Attributes.PARTITION_NAME.toString()); } + /** + * Get the resource partition associated with this message + * @return partition id + */ + public PartitionId getPartitionId() { + return Id.partition(getPartitionName()); + } + /** * Get the state model definition name * @return a String reference to the state model definition, e.g. "MasterSlave" @@ -412,6 +489,14 @@ public String getStateModelDef() { return _record.getSimpleField(Attributes.STATE_MODEL_DEF.toString()); } + /** + * Get the state model definition id + * @return a reference to the state model definition + */ + public StateModelDefId getStateModelDefId() { + return Id.stateModelDef(getStateModelDef()); + } + /** * Set the state model definition name * @param stateModelDefName a reference to the state model definition, e.g. "MasterSlave" @@ -581,7 +666,7 @@ public String getAttribute(Attributes attr) { public static Message createReplyMessage(Message srcMessage, String instanceName, Map taskResultMap) { if (srcMessage.getCorrelationId() == null) { - throw new HelixException("Message " + srcMessage.getMsgId() + throw new HelixException("Message " + srcMessage.getMsgIdString() + " does not contain correlation id"); } Message replyMessage = new Message(MessageType.TASK_REPLY, UUID.randomUUID().toString()); @@ -626,6 +711,18 @@ public List getPartitionNames() { return partitionNames; } + /** + * Get a list of partitions associated with this message + * @return list of partition ids + */ + public List getPartitionIds() { + ImmutableList.Builder builder = new ImmutableList.Builder(); + for (String partitionName : getPartitionNames()) { + builder.add(Id.partition(partitionName)); + } + return builder.build(); + } + // public AtomicInteger getGroupMsgCountDown() // { // return _groupMsgCountDown; @@ -671,8 +768,8 @@ public boolean isValid() { boolean isNotValid = isNullOrEmpty(getTgtName()) || isNullOrEmpty(getPartitionName()) || isNullOrEmpty(getResourceName()) || isNullOrEmpty(getStateModelDef()) - || isNullOrEmpty(getToState()) || isNullOrEmpty(getStateModelFactoryName()) - || isNullOrEmpty(getFromState()); + || isNullOrEmpty(getToStateString()) || isNullOrEmpty(getStateModelFactoryName()) + || isNullOrEmpty(getFromStateString()); return !isNotValid; } diff --git a/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java b/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java index 2b3d14dfc6..7013829483 100644 --- a/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java +++ b/helix-core/src/main/java/org/apache/helix/model/ResourceAssignment.java @@ -55,6 +55,14 @@ public ResourceAssignment(ResourceAssignment existingMapping) { super(existingMapping); } + /** + * Get the resource for which this assignment was created + * @return resource name + */ + public String getResourceName() { + return getId(); + } + /** * Get the currently mapped partitions * @return list of Partition objects diff --git a/helix-core/src/main/java/org/apache/helix/model/StateModelDefinition.java b/helix-core/src/main/java/org/apache/helix/model/StateModelDefinition.java index 7f08b6fc7c..caaf81e1b7 100644 --- a/helix-core/src/main/java/org/apache/helix/model/StateModelDefinition.java +++ b/helix-core/src/main/java/org/apache/helix/model/StateModelDefinition.java @@ -30,9 +30,12 @@ import org.apache.helix.HelixDefinedState; import org.apache.helix.HelixProperty; import org.apache.helix.ZNRecord; +import org.apache.helix.api.State; import org.apache.helix.model.builder.StateTransitionTableBuilder; import org.apache.log4j.Logger; +import com.google.common.collect.ImmutableList; + /** * Describe the state model */ @@ -141,18 +144,42 @@ void addDefaultTransition(String from, String to, String next) { * Get an ordered priority list of transitions * @return transitions in the form SRC-DEST, the first of which is highest priority */ - public List getStateTransitionPriorityList() { + public List getStateTransitionPriorityStringList() { return _stateTransitionPriorityList; } + /** + * Get an ordered priority list of transitions + * @return Transition objects, the first of which is highest priority + */ + public List getStateTransitionPriorityList() { + ImmutableList.Builder builder = new ImmutableList.Builder(); + for (String transition : getStateTransitionPriorityStringList()) { + builder.add(Transition.from(transition)); + } + return builder.build(); + } + /** * Get an ordered priority list of states * @return state names, the first of which is highest priority */ - public List getStatesPriorityList() { + public List getStatesPriorityStringList() { return _statesPriorityList; } + /** + * Get an ordered priority list of states + * @return immutable list of states, the first of which is highest priority + */ + public List getStatesPriorityList() { + ImmutableList.Builder builder = new ImmutableList.Builder(); + for (String state : getStatesPriorityStringList()) { + builder.add(State.from(state)); + } + return builder.build(); + } + /** * Get the intermediate state required to transition from one state to the other * @param fromState the source @@ -167,16 +194,40 @@ public String getNextStateForTransition(String fromState, String toState) { return null; } + /** + * Get the intermediate state required to transition from one state to the other + * @param fromState the source + * @param toState the destination + * @return the intermediate state, or null if not present + */ + public State getNextStateForTransition(State fromState, State toState) { + String next = getNextStateForTransition(fromState.toString(), toState.toString()); + if (next != null) { + return State.from(getNextStateForTransition(fromState.toString(), toState.toString())); + } + return null; + } + /** * Get the starting state in the model * @return name of the initial state */ - public String getInitialState() { + public String getInitialStateString() { // return _record.getSimpleField(StateModelDefinitionProperty.INITIAL_STATE // .toString()); return _initialState; } + /** + * Get the starting state in the model + * @return name of the initial state + */ + public State getInitialState() { + // return _record.getSimpleField(StateModelDefinitionProperty.INITIAL_STATE + // .toString()); + return State.from(_initialState); + } + /** * Number of instances that can be in each state * @param state the state name @@ -188,7 +239,7 @@ public String getNumInstancesPerState(String state) { @Override public boolean isValid() { - if (getInitialState() == null) { + if (getInitialStateString() == null) { _logger.error("State model does not contain init state, statemodel:" + _record.getId()); return false; } diff --git a/helix-core/src/main/java/org/apache/helix/model/Transition.java b/helix-core/src/main/java/org/apache/helix/model/Transition.java index 2151c44c7c..0e7f32f9a6 100644 --- a/helix-core/src/main/java/org/apache/helix/model/Transition.java +++ b/helix-core/src/main/java/org/apache/helix/model/Transition.java @@ -70,4 +70,14 @@ public String getToState() { return _toState; } + /** + * Create a new transition + * @param transition string representing a transition, e.g. "STATE1-STATE2" + * @return Transition + */ + public static Transition from(String transition) { + String fromState = transition.substring(0, transition.indexOf('-')); + String toState = transition.substring(transition.indexOf('-') + 1); + return new Transition(fromState, toState); + } } diff --git a/helix-core/src/main/java/org/apache/helix/model/builder/CurrentStateBuilder.java b/helix-core/src/main/java/org/apache/helix/model/builder/CurrentStateBuilder.java new file mode 100644 index 0000000000..26238f2d3c --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/model/builder/CurrentStateBuilder.java @@ -0,0 +1,123 @@ +package org.apache.helix.model.builder; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.helix.ZNRecord; +import org.apache.helix.api.PartitionId; +import org.apache.helix.api.ResourceId; +import org.apache.helix.api.SessionId; +import org.apache.helix.api.State; +import org.apache.helix.api.StateModelDefId; +import org.apache.helix.model.CurrentState; +import org.apache.helix.model.CurrentState.CurrentStateProperty; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Assemble a CurrentState + */ +public class CurrentStateBuilder { + private final ResourceId _resourceId; + private final Map _partitionStateMap; + private SessionId _sessionId; + private StateModelDefId _stateModelDefId; + private String _stateModelFactoryName; + + /** + * Build a current state for a given resource + * @param resourceId resource identifier + */ + public CurrentStateBuilder(ResourceId resourceId) { + _resourceId = resourceId; + _partitionStateMap = new HashMap(); + } + + /** + * Add partition-state mappings for this instance and resource + * @param mappings map of partition to state + * @return CurrentStateBuilder + */ + public CurrentStateBuilder addMappings(Map mappings) { + _partitionStateMap.putAll(mappings); + return this; + } + + /** + * Add a single partition-state mapping for this instance and resource + * @param partitionId the partition to map + * @param state the replica state + * @return CurrentStateBuilder + */ + public CurrentStateBuilder addMapping(PartitionId partitionId, State state) { + _partitionStateMap.put(partitionId, state); + return this; + } + + /** + * Set the session id for this current state + * @param sessionId session identifier + * @return CurrentStateBuilder + */ + public CurrentStateBuilder sessionId(SessionId sessionId) { + _sessionId = sessionId; + return this; + } + + /** + * Set the state model for this current state + * @param stateModelDefId state model definition identifier + * @return CurrentStateBuilder + */ + public CurrentStateBuilder stateModelDef(StateModelDefId stateModelDefId) { + _stateModelDefId = stateModelDefId; + return this; + } + + /** + * Set the name of the state model factory + * @param stateModelFactoryName state model factory identifier + * @return CurrentStateBuilder + */ + public CurrentStateBuilder stateModelFactory(String stateModelFactoryName) { + _stateModelFactoryName = stateModelFactoryName; + return this; + } + + /** + * Create a CurrentState + * @return instantiated CurrentState + */ + public CurrentState build() { + ZNRecord record = new ZNRecord(_resourceId.stringify()); + for (PartitionId partitionId : _partitionStateMap.keySet()) { + Map stateMap = new HashMap(); + stateMap.put(CurrentStateProperty.CURRENT_STATE.toString(), + _partitionStateMap.get(partitionId).toString()); + record.setMapField(partitionId.toString(), stateMap); + } + record.setSimpleField(CurrentStateProperty.SESSION_ID.toString(), _sessionId.toString()); + record.setSimpleField(CurrentStateProperty.STATE_MODEL_DEF.toString(), + _stateModelDefId.toString()); + record.setSimpleField(CurrentStateProperty.STATE_MODEL_FACTORY_NAME.toString(), + _stateModelFactoryName); + return new CurrentState(record); + } +} diff --git a/helix-core/src/main/java/org/apache/helix/model/builder/IdealStateBuilder.java b/helix-core/src/main/java/org/apache/helix/model/builder/IdealStateBuilder.java index a7c0335f59..7ae9dfe14f 100644 --- a/helix-core/src/main/java/org/apache/helix/model/builder/IdealStateBuilder.java +++ b/helix-core/src/main/java/org/apache/helix/model/builder/IdealStateBuilder.java @@ -22,6 +22,8 @@ import org.apache.helix.HelixConstants; import org.apache.helix.HelixException; import org.apache.helix.ZNRecord; +import org.apache.helix.api.ResourceId; +import org.apache.helix.api.StateModelDefId; import org.apache.helix.model.IdealState; public abstract class IdealStateBuilder { @@ -73,6 +75,14 @@ public IdealStateBuilder(String resourceName) { _record = new ZNRecord(resourceName); } + /** + * Instantiate with a resource id + * @param resourceId the resource for which to build an ideal state + */ + public IdealStateBuilder(ResourceId resourceId) { + this(resourceId.stringify()); + } + /** * @param numReplica */ @@ -97,6 +107,15 @@ public IdealStateBuilder setStateModel(String stateModel) { return this; } + /** + * Set the state model definition to use with this ideal state + * @param stateModelDefId state model identifier + */ + public IdealStateBuilder setStateModelDefId(StateModelDefId stateModelDefId) { + this.stateModel = stateModelDefId.stringify(); + return this; + } + /** * @param stateModelFactoryName */ diff --git a/helix-core/src/main/java/org/apache/helix/monitoring/mbeans/ResourceMonitor.java b/helix-core/src/main/java/org/apache/helix/monitoring/mbeans/ResourceMonitor.java index e24b41f955..769ef2d438 100644 --- a/helix-core/src/main/java/org/apache/helix/monitoring/mbeans/ResourceMonitor.java +++ b/helix-core/src/main/java/org/apache/helix/monitoring/mbeans/ResourceMonitor.java @@ -111,7 +111,7 @@ public void updateExternalView(ExternalView externalView, IdealState idealState) } _numOfErrorPartitions = numOfErrorPartitions; _externalViewIdealStateDiff = numOfDiff; - _numOfPartitionsInExternalView = externalView.getPartitionSet().size(); + _numOfPartitionsInExternalView = externalView.getPartitionStringSet().size(); } @Override diff --git a/helix-core/src/main/java/org/apache/helix/participant/DistClusterControllerElection.java b/helix-core/src/main/java/org/apache/helix/participant/DistClusterControllerElection.java index 25aada2846..3551043dbb 100644 --- a/helix-core/src/main/java/org/apache/helix/participant/DistClusterControllerElection.java +++ b/helix-core/src/main/java/org/apache/helix/participant/DistClusterControllerElection.java @@ -142,7 +142,7 @@ private boolean tryUpdateController(HelixManager manager) { leader = accessor.getProperty(keyBuilder.controllerLeader()); if (leader != null) { - String leaderSessionId = leader.getSessionId(); + String leaderSessionId = leader.getSessionIdString(); LOG.info("Leader exists for cluster: " + manager.getClusterName() + ", currentLeader: " + leader.getInstanceName() + ", leaderSessionId: " + leaderSessionId); diff --git a/helix-core/src/main/java/org/apache/helix/participant/HelixStateMachineEngine.java b/helix-core/src/main/java/org/apache/helix/participant/HelixStateMachineEngine.java index 31fcecf4a8..5fd276f363 100644 --- a/helix-core/src/main/java/org/apache/helix/participant/HelixStateMachineEngine.java +++ b/helix-core/src/main/java/org/apache/helix/participant/HelixStateMachineEngine.java @@ -160,13 +160,13 @@ public MessageHandler createHandler(Message message, NotificationContext context if (!type.equals(MessageType.STATE_TRANSITION.toString())) { throw new HelixException("Expect state-transition message type, but was " - + message.getMsgType() + ", msgId: " + message.getMsgId()); + + message.getMsgType() + ", msgId: " + message.getMsgIdString()); } String partitionKey = message.getPartitionName(); String stateModelName = message.getStateModelDef(); String resourceName = message.getResourceName(); - String sessionId = message.getTgtSessionId(); + String sessionId = message.getTgtSessionIdString(); int bucketSize = message.getBucketSize(); if (stateModelName == null) { @@ -205,7 +205,7 @@ public MessageHandler createHandler(Message message, NotificationContext context if (message.getBatchMessageMode() == false) { // create currentStateDelta for this partition - String initState = _stateModelDefs.get(message.getStateModelDef()).getInitialState(); + String initState = _stateModelDefs.get(message.getStateModelDef()).getInitialStateString(); StateModel stateModel = stateModelFactory.getStateModel(partitionKey); if (stateModel == null) { stateModel = stateModelFactory.createAndAddStateModel(partitionKey); diff --git a/helix-core/src/main/java/org/apache/helix/spectator/RoutingTableProvider.java b/helix-core/src/main/java/org/apache/helix/spectator/RoutingTableProvider.java index 9bba660f68..ed411d19cc 100644 --- a/helix-core/src/main/java/org/apache/helix/spectator/RoutingTableProvider.java +++ b/helix-core/src/main/java/org/apache/helix/spectator/RoutingTableProvider.java @@ -133,7 +133,7 @@ private void refresh(List externalViewList, NotificationContext ch if (externalViewList != null) { for (ExternalView extView : externalViewList) { String resourceName = extView.getId(); - for (String partitionName : extView.getPartitionSet()) { + for (String partitionName : extView.getPartitionStringSet()) { Map stateMap = extView.getStateMap(partitionName); for (String instanceName : stateMap.keySet()) { String currentState = stateMap.get(instanceName); diff --git a/helix-core/src/main/java/org/apache/helix/tools/ZkLogAnalyzer.java b/helix-core/src/main/java/org/apache/helix/tools/ZkLogAnalyzer.java index 11e1b6681a..1cf824cc20 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/ZkLogAnalyzer.java +++ b/helix-core/src/main/java/org/apache/helix/tools/ZkLogAnalyzer.java @@ -342,11 +342,11 @@ public boolean accept(File file) { // sendMessageLines.add(inputLine); stats.msgSentCount++; - if (msg.getFromState().equals("OFFLINE") && msg.getToState().equals("SLAVE")) { + if (msg.getFromStateString().equals("OFFLINE") && msg.getToStateString().equals("SLAVE")) { stats.msgSentCount_O2S++; - } else if (msg.getFromState().equals("SLAVE") && msg.getToState().equals("MASTER")) { + } else if (msg.getFromStateString().equals("SLAVE") && msg.getToStateString().equals("MASTER")) { stats.msgSentCount_S2M++; - } else if (msg.getFromState().equals("MASTER") && msg.getToState().equals("SLAVE")) { + } else if (msg.getFromStateString().equals("MASTER") && msg.getToStateString().equals("SLAVE")) { stats.msgSentCount_M2S++; } // System.out.println("Message create:"+new diff --git a/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java b/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java index 273adc323b..0911cc4461 100644 --- a/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java +++ b/helix-core/src/main/java/org/apache/helix/util/RebalanceUtil.java @@ -37,7 +37,7 @@ public static Map buildInternalIdealState(IdealState state) { Map partitionIndex = new HashMap(); Map reversePartitionIndex = new HashMap(); boolean indexInPartitionName = true; - for (String partitionId : state.getPartitionSet()) { + for (String partitionId : state.getPartitionStringSet()) { int lastPos = partitionId.lastIndexOf("_"); if (lastPos < 0) { indexInPartitionName = false; @@ -58,7 +58,7 @@ public static Map buildInternalIdealState(IdealState state) { if (indexInPartitionName == false) { List partitions = new ArrayList(); - partitions.addAll(state.getPartitionSet()); + partitions.addAll(state.getPartitionStringSet()); Collections.sort(partitions); for (int i = 0; i < partitions.size(); i++) { partitionIndex.put(partitions.get(i), i); @@ -69,7 +69,7 @@ public static Map buildInternalIdealState(IdealState state) { Map> nodeMasterAssignmentMap = new TreeMap>(); Map>> combinedNodeSlaveAssignmentMap = new TreeMap>>(); - for (String partition : state.getPartitionSet()) { + for (String partition : state.getPartitionStringSet()) { List instances = state.getRecord().getListField(partition); String master = instances.get(0); if (!nodeMasterAssignmentMap.containsKey(master)) { @@ -104,7 +104,7 @@ public static String[] parseStates(String clusterName, StateModelDefinition stat // StateModelDefinition def = new StateModelDefinition(stateModDef); - List statePriorityList = stateModDef.getStatesPriorityList(); + List statePriorityList = stateModDef.getStatesPriorityStringList(); for (String state : statePriorityList) { String count = stateModDef.getNumInstancesPerState(state); diff --git a/helix-core/src/main/java/org/apache/helix/util/StatusUpdateUtil.java b/helix-core/src/main/java/org/apache/helix/util/StatusUpdateUtil.java index 02c39d110a..7fab9d549a 100644 --- a/helix-core/src/main/java/org/apache/helix/util/StatusUpdateUtil.java +++ b/helix-core/src/main/java/org/apache/helix/util/StatusUpdateUtil.java @@ -254,7 +254,7 @@ public ZNRecord createEmptyStatusUpdateRecord(String id) { */ ZNRecord createMessageLogRecord(Message message) { ZNRecord result = new ZNRecord(getStatusUpdateRecordName(message)); - String mapFieldKey = "MESSAGE " + message.getMsgId(); + String mapFieldKey = "MESSAGE " + message.getMsgIdString(); result.setMapField(mapFieldKey, new TreeMap()); // Store all the simple fields of the message in the new ZNRecord's map @@ -290,7 +290,7 @@ public ZNRecord createMessageStatusUpdateRecord(Message message, Level level, Cl contentMap.put("Message state", message.getMsgState().toString()); contentMap.put("AdditionalInfo", additionalInfo); contentMap.put("Class", classInfo.toString()); - contentMap.put("MSG_ID", message.getMsgId()); + contentMap.put("MSG_ID", message.getMsgIdString()); DateFormat formatter = new SimpleDateFormat("yyyyMMdd-HHmmss.SSSSSS"); String time = formatter.format(new Date()); @@ -304,8 +304,8 @@ public ZNRecord createMessageStatusUpdateRecord(Message message, Level level, Cl String getRecordIdForMessage(Message message) { if (message.getMsgType().equals(MessageType.STATE_TRANSITION)) { - return message.getPartitionName() + " Trans:" + message.getFromState().charAt(0) + "->" - + message.getToState().charAt(0) + " " + UUID.randomUUID().toString(); + return message.getPartitionName() + " Trans:" + message.getFromStateString().charAt(0) + "->" + + message.getToStateString().charAt(0) + " " + UUID.randomUUID().toString(); } else { return message.getMsgType() + " " + UUID.randomUUID().toString(); } @@ -375,16 +375,16 @@ void publishStatusUpdateRecord(ZNRecord record, Message message, Level level, String instanceName = message.getTgtName(); String statusUpdateSubPath = getStatusUpdateSubPath(message); String statusUpdateKey = getStatusUpdateKey(message); - String sessionId = message.getExecutionSessionId(); + String sessionId = message.getExecutionSessionIdString(); if (sessionId == null) { - sessionId = message.getTgtSessionId(); + sessionId = message.getTgtSessionIdString(); } if (sessionId == null) { sessionId = "*"; } Builder keyBuilder = accessor.keyBuilder(); - if (!_recordedMessages.containsKey(message.getMsgId())) { + if (!_recordedMessages.containsKey(message.getMsgIdString())) { // TODO instanceName of a controller might be any string if (instanceName.equalsIgnoreCase("Controller")) { accessor.updateProperty( @@ -408,7 +408,7 @@ void publishStatusUpdateRecord(ZNRecord record, Message message, Level level, accessor.updateProperty(propertyKey, new StatusUpdate(statusUpdateRecord)); } - _recordedMessages.put(message.getMsgId(), message.getMsgId()); + _recordedMessages.put(message.getMsgIdString(), message.getMsgIdString()); } if (instanceName.equalsIgnoreCase("Controller")) { @@ -438,7 +438,7 @@ private String getStatusUpdateKey(Message message) { if (message.getMsgType().equalsIgnoreCase(MessageType.STATE_TRANSITION.toString())) { return message.getPartitionName(); } - return message.getMsgId(); + return message.getMsgIdString(); } /** @@ -454,9 +454,9 @@ String getStatusUpdateSubPath(Message message) { String getStatusUpdateRecordName(Message message) { if (message.getMsgType().equalsIgnoreCase(MessageType.STATE_TRANSITION.toString())) { - return message.getTgtSessionId() + "__" + message.getResourceName(); + return message.getTgtSessionIdString() + "__" + message.getResourceName(); } - return message.getMsgId(); + return message.getMsgIdString(); } /** @@ -472,9 +472,9 @@ void publishErrorRecord(ZNRecord record, Message message, HelixDataAccessor acce String instanceName = message.getTgtName(); String statusUpdateSubPath = getStatusUpdateSubPath(message); String statusUpdateKey = getStatusUpdateKey(message); - String sessionId = message.getExecutionSessionId(); + String sessionId = message.getExecutionSessionIdString(); if (sessionId == null) { - sessionId = message.getTgtSessionId(); + sessionId = message.getTgtSessionIdString(); } if (sessionId == null) { sessionId = "*"; diff --git a/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java b/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java index abf75becdd..58dd070d5b 100644 --- a/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java +++ b/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java @@ -43,9 +43,9 @@ import org.apache.helix.model.InstanceConfig; import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Message; -import org.apache.helix.model.StateModelDefinition; import org.apache.helix.model.Message.Attributes; import org.apache.helix.model.Message.MessageType; +import org.apache.helix.model.StateModelDefinition; import org.apache.helix.tools.StateModelConfigGenerator; import org.apache.helix.util.HelixUtil; import org.apache.log4j.Logger; @@ -177,7 +177,7 @@ public void verifyReplication(ZkClient zkClient, String clusterName, String reso Builder keyBuilder = accessor.keyBuilder(); IdealState idealState = accessor.getProperty(keyBuilder.idealStates(resource)); - for (String partitionName : idealState.getPartitionSet()) { + for (String partitionName : idealState.getPartitionStringSet()) { if (idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO) { AssertJUnit.assertEquals(repl, idealState.getPreferenceList(partitionName).size()); } else if (idealState.getRebalanceMode() == RebalanceMode.CUSTOMIZED) { diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestMsgSelectionStage.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestMsgSelectionStage.java index 820abbebd6..b8166f6751 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestMsgSelectionStage.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestMsgSelectionStage.java @@ -67,7 +67,7 @@ public void testMasterXfer() { messages, stateConstraints, stateTransitionPriorities, "OFFLINE"); Assert.assertEquals(selectedMsg.size(), 1); - Assert.assertEquals(selectedMsg.get(0).getMsgId(), "msgId_1"); + Assert.assertEquals(selectedMsg.get(0).getMsgIdString(), "msgId_1"); System.out.println("END testMasterXfer at " + new Date(System.currentTimeMillis())); } diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestRebalancePipeline.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestRebalancePipeline.java index fccd0c796a..4c5a0d74a7 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestRebalancePipeline.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestRebalancePipeline.java @@ -111,8 +111,8 @@ public void testDuplicateMsg() { msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0")); Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE-SLAVE for node0"); Message message = messages.get(0); - Assert.assertEquals(message.getFromState(), "OFFLINE"); - Assert.assertEquals(message.getToState(), "SLAVE"); + Assert.assertEquals(message.getFromStateString(), "OFFLINE"); + Assert.assertEquals(message.getToStateString(), "SLAVE"); Assert.assertEquals(message.getTgtName(), "localhost_0"); // round2: updates node0 currentState to SLAVE but keep the @@ -258,8 +258,8 @@ public void testChangeIdealStateWithPendingMsg() { msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0")); Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE-SLAVE for node0"); Message message = messages.get(0); - Assert.assertEquals(message.getFromState(), "OFFLINE"); - Assert.assertEquals(message.getToState(), "SLAVE"); + Assert.assertEquals(message.getFromStateString(), "OFFLINE"); + Assert.assertEquals(message.getToStateString(), "SLAVE"); Assert.assertEquals(message.getTgtName(), "localhost_0"); // round2: drop resource, but keep the @@ -275,8 +275,8 @@ public void testChangeIdealStateWithPendingMsg() { "Should output only 1 message: OFFLINE->DROPPED for localhost_1"); message = messages.get(0); - Assert.assertEquals(message.getFromState(), "SLAVE"); - Assert.assertEquals(message.getToState(), "OFFLINE"); + Assert.assertEquals(message.getFromStateString(), "SLAVE"); + Assert.assertEquals(message.getToStateString(), "OFFLINE"); Assert.assertEquals(message.getTgtName(), "localhost_1"); // round3: remove O->S for localhost_0, controller should now send O->DROPPED to @@ -291,8 +291,8 @@ public void testChangeIdealStateWithPendingMsg() { Assert.assertEquals(messages.size(), 1, "Should output 1 message: OFFLINE->DROPPED for localhost_0"); message = messages.get(0); - Assert.assertEquals(message.getFromState(), "OFFLINE"); - Assert.assertEquals(message.getToState(), "DROPPED"); + Assert.assertEquals(message.getFromStateString(), "OFFLINE"); + Assert.assertEquals(message.getToStateString(), "DROPPED"); Assert.assertEquals(message.getTgtName(), "localhost_0"); System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); @@ -351,8 +351,8 @@ public void testMasterXfer() { msgSelOutput.getMessages(resourceName, new Partition(resourceName + "_0")); Assert.assertEquals(messages.size(), 1, "Should output 1 message: SLAVE-MASTER for node1"); Message message = messages.get(0); - Assert.assertEquals(message.getFromState(), "SLAVE"); - Assert.assertEquals(message.getToState(), "MASTER"); + Assert.assertEquals(message.getFromStateString(), "SLAVE"); + Assert.assertEquals(message.getToStateString(), "MASTER"); Assert.assertEquals(message.getTgtName(), "localhost_1"); // round2: updates node0 currentState to SLAVE but keep the diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java index 6febe934d4..3f6fa757b3 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java @@ -173,7 +173,7 @@ public void testMultipleResourcesWithSomeDropped() throws Exception { AssertJUnit.assertEquals(resourceMap.get(oldResource).getStateModelDefRef(), currentState.getStateModelDefRef()); AssertJUnit.assertEquals(resourceMap.get(oldResource).getPartitions().size(), currentState - .getPartitionStateMap().size()); + .getPartitionStateStringMap().size()); AssertJUnit.assertNotNull(resourceMap.get(oldResource).getPartition("testResourceOld_0")); AssertJUnit.assertNotNull(resourceMap.get(oldResource).getPartition("testResourceOld_1")); AssertJUnit.assertNotNull(resourceMap.get(oldResource).getPartition("testResourceOld_2")); diff --git a/helix-core/src/test/java/org/apache/helix/healthcheck/TestAddDropAlert.java b/helix-core/src/test/java/org/apache/helix/healthcheck/TestAddDropAlert.java index d5a1b08120..3de9d4db5b 100644 --- a/helix-core/src/test/java/org/apache/helix/healthcheck/TestAddDropAlert.java +++ b/helix-core/src/test/java/org/apache/helix/healthcheck/TestAddDropAlert.java @@ -75,8 +75,8 @@ public class AddDropAlertTransition extends MockTransition { public void doTransition(Message message, NotificationContext context) { HelixManager manager = context.getManager(); HelixDataAccessor accessor = manager.getHelixDataAccessor(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String instance = message.getTgtName(); String partition = message.getPartitionName(); diff --git a/helix-core/src/test/java/org/apache/helix/healthcheck/TestExpandAlert.java b/helix-core/src/test/java/org/apache/helix/healthcheck/TestExpandAlert.java index 69d1062de1..211f2308ab 100644 --- a/helix-core/src/test/java/org/apache/helix/healthcheck/TestExpandAlert.java +++ b/helix-core/src/test/java/org/apache/helix/healthcheck/TestExpandAlert.java @@ -77,8 +77,8 @@ public class ExpandAlertTransition extends MockTransition { public void doTransition(Message message, NotificationContext context) { HelixManager manager = context.getManager(); HelixDataAccessor accessor = manager.getHelixDataAccessor(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String instance = message.getTgtName(); String partition = message.getPartitionName(); diff --git a/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleAlert.java b/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleAlert.java index 1db5ddd41d..f5eb13c600 100644 --- a/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleAlert.java +++ b/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleAlert.java @@ -82,8 +82,8 @@ public SimpleAlertTransition(int value) { public void doTransition(Message message, NotificationContext context) { HelixManager manager = context.getManager(); HelixDataAccessor accessor = manager.getHelixDataAccessor(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String instance = message.getTgtName(); String partition = message.getPartitionName(); diff --git a/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleWildcardAlert.java b/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleWildcardAlert.java index c5b55dabea..9e7077eebc 100644 --- a/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleWildcardAlert.java +++ b/helix-core/src/test/java/org/apache/helix/healthcheck/TestSimpleWildcardAlert.java @@ -81,8 +81,8 @@ public SimpleAlertTransition(int value) { public void doTransition(Message message, NotificationContext context) { HelixManager manager = context.getManager(); HelixDataAccessor accessor = manager.getHelixDataAccessor(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String instance = message.getTgtName(); String partition = message.getPartitionName(); diff --git a/helix-core/src/test/java/org/apache/helix/healthcheck/TestStalenessAlert.java b/helix-core/src/test/java/org/apache/helix/healthcheck/TestStalenessAlert.java index 2304b417de..36cd9f58a4 100644 --- a/helix-core/src/test/java/org/apache/helix/healthcheck/TestStalenessAlert.java +++ b/helix-core/src/test/java/org/apache/helix/healthcheck/TestStalenessAlert.java @@ -75,8 +75,8 @@ public class StalenessAlertTransition extends MockTransition { public void doTransition(Message message, NotificationContext context) { HelixManager manager = context.getManager(); HelixDataAccessor accessor = manager.getHelixDataAccessor(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String instance = message.getTgtName(); String partition = message.getPartitionName(); diff --git a/helix-core/src/test/java/org/apache/helix/healthcheck/TestWildcardAlert.java b/helix-core/src/test/java/org/apache/helix/healthcheck/TestWildcardAlert.java index a0456a7e7f..a33e75c68a 100644 --- a/helix-core/src/test/java/org/apache/helix/healthcheck/TestWildcardAlert.java +++ b/helix-core/src/test/java/org/apache/helix/healthcheck/TestWildcardAlert.java @@ -151,8 +151,8 @@ public class WildcardAlertTransition extends MockTransition { public void doTransition(Message message, NotificationContext context) { HelixManager manager = context.getManager(); HelixDataAccessor accessor = manager.getHelixDataAccessor(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String instance = message.getTgtName(); String partition = message.getPartitionName(); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java index 1943364767..f873b0e9fd 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java @@ -264,7 +264,7 @@ public boolean verify() { cache.refresh(accessor); String masterValue = cache.getStateModelDef(cache.getIdealState(_resourceName).getStateModelDefRef()) - .getStatesPriorityList().get(0); + .getStatesPriorityStringList().get(0); int replicas = Integer.parseInt(cache.getIdealState(_resourceName).getReplicas()); String instanceGroupTag = cache.getIdealState(_resourceName).getInstanceGroupTag(); int instances = 0; diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java index 32cafcfa72..2d16e61535 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java @@ -98,11 +98,11 @@ public void beforeClass() throws Exception { Assert.assertTrue(result); ExternalView ev = manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().externalView(TEST_DB)); - System.out.println(ev.getPartitionSet().size()); + System.out.println(ev.getPartitionStringSet().size()); if (i < 3) { - Assert.assertEquals(ev.getPartitionSet().size(), 25 * (i + 1)); + Assert.assertEquals(ev.getPartitionStringSet().size(), 25 * (i + 1)); } else { - Assert.assertEquals(ev.getPartitionSet().size(), 100); + Assert.assertEquals(ev.getPartitionStringSet().size(), 100); } } } @@ -132,7 +132,7 @@ public void testAutoRebalanceWithMaxPartitionPerNode() throws Exception { HelixDataAccessor accessor = manager.getHelixDataAccessor(); ExternalView ev = manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().externalView(TEST_DB)); - Assert.assertEquals(ev.getPartitionSet().size(), 100); + Assert.assertEquals(ev.getPartitionStringSet().size(), 100); instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + 1); _startCMResultMap.get(instanceName)._manager.disconnect(); @@ -145,7 +145,7 @@ public void testAutoRebalanceWithMaxPartitionPerNode() throws Exception { CLUSTER_NAME, TEST_DB)); Assert.assertTrue(result); ev = manager.getHelixDataAccessor().getProperty(accessor.keyBuilder().externalView(TEST_DB)); - Assert.assertEquals(ev.getPartitionSet().size(), 75); + Assert.assertEquals(ev.getPartitionStringSet().size(), 75); // add 2 nodes for (int i = 0; i < 2; i++) { @@ -231,7 +231,7 @@ public boolean verify() { cache.refresh(accessor); String masterValue = cache.getStateModelDef(cache.getIdealState(_resourceName).getStateModelDefRef()) - .getStatesPriorityList().get(0); + .getStatesPriorityStringList().get(0); int replicas = Integer.parseInt(cache.getIdealState(_resourceName).getReplicas()); return verifyBalanceExternalView(accessor.getProperty(keyBuilder.externalView(_resourceName)) .getRecord(), numberOfPartitions, masterValue, replicas, cache.getLiveInstances().size(), diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestCleanupExternalView.java b/helix-core/src/test/java/org/apache/helix/integration/TestCleanupExternalView.java index 781aa89a5e..d3ac8ac2de 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestCleanupExternalView.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestCleanupExternalView.java @@ -94,10 +94,10 @@ public void test() throws Exception { // System.out.println("remove current-state"); LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance("localhost_12918")); - accessor.removeProperty(keyBuilder.currentState("localhost_12918", liveInstance.getSessionId(), + accessor.removeProperty(keyBuilder.currentState("localhost_12918", liveInstance.getSessionIdString(), "TestDB0")); liveInstance = accessor.getProperty(keyBuilder.liveInstance("localhost_12919")); - accessor.removeProperty(keyBuilder.currentState("localhost_12919", liveInstance.getSessionId(), + accessor.removeProperty(keyBuilder.currentState("localhost_12919", liveInstance.getSessionIdString(), "TestDB0")); // re-enable controller shall remove orphan external-view diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java index 55fc876f85..aba14a7db6 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java @@ -78,7 +78,7 @@ public ResourceAssignment computeResourceMapping(Resource resource, int nodeIndex = i % liveInstances.size(); currentIdealState.getInstanceStateMap(partitionName).clear(); currentIdealState.getInstanceStateMap(partitionName).put(liveInstances.get(nodeIndex), - stateModelDef.getStatesPriorityList().get(0)); + stateModelDef.getStatesPriorityStringList().get(0)); resourceMapping.addReplicaMap(partition, currentIdealState.getInstanceStateMap(partitionName)); i++; @@ -108,12 +108,12 @@ public void testCustomizedIdealStateRebalancer() throws InterruptedException { new ZKHelixDataAccessor(CLUSTER_NAME, new ZkBaseDataAccessor(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); ExternalView ev = accessor.getProperty(keyBuilder.externalView(db2)); - Assert.assertEquals(ev.getPartitionSet().size(), 60); - for (String partition : ev.getPartitionSet()) { + Assert.assertEquals(ev.getPartitionStringSet().size(), 60); + for (String partition : ev.getPartitionStringSet()) { Assert.assertEquals(ev.getStateMap(partition).size(), 1); } IdealState is = accessor.getProperty(keyBuilder.idealStates(db2)); - for (String partition : is.getPartitionSet()) { + for (String partition : is.getPartitionStringSet()) { Assert.assertEquals(is.getPreferenceList(partition).size(), 0); Assert.assertEquals(is.getInstanceStateMap(partition).size(), 0); } @@ -145,7 +145,7 @@ public boolean verify() { cache.refresh(accessor); String masterValue = cache.getStateModelDef(cache.getIdealState(_resourceName).getStateModelDefRef()) - .getStatesPriorityList().get(0); + .getStatesPriorityStringList().get(0); int replicas = Integer.parseInt(cache.getIdealState(_resourceName).getReplicas()); String instanceGroupTag = cache.getIdealState(_resourceName).getInstanceGroupTag(); int instances = 0; diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java b/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java index b1fcc6077f..84e70efdc2 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java @@ -353,7 +353,7 @@ public void testDropSchemataResource() throws Exception { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor); Builder keyBuilder = accessor.keyBuilder(); ExternalView extView = accessor.getProperty(keyBuilder.externalView("schemata")); - Assert.assertEquals(extView.getPartitionSet().size(), 0, + Assert.assertEquals(extView.getPartitionStringSet().size(), 0, "schemata externalView should be empty but was \"" + extView + "\""); // clean up diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestEnablePartitionDuringDisable.java b/helix-core/src/test/java/org/apache/helix/integration/TestEnablePartitionDuringDisable.java index 48cabbd13f..507a355c2a 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestEnablePartitionDuringDisable.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestEnablePartitionDuringDisable.java @@ -50,8 +50,8 @@ public void doTransition(Message message, NotificationContext context) { String instance = message.getTgtName(); String partitionName = message.getPartitionName(); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); if (instance.equals("localhost_12919") && partitionName.equals("TestDB0_0")) { if (fromState.equals("SLAVE") && toState.equals("OFFLINE")) { slaveToOfflineCnt++; diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestHelixInstanceTag.java b/helix-core/src/test/java/org/apache/helix/integration/TestHelixInstanceTag.java index 448438635c..69b45daf2b 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestHelixInstanceTag.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestHelixInstanceTag.java @@ -56,7 +56,7 @@ public void testInstanceTag() throws Exception { ExternalView ev = accessor.getProperty(accessor.keyBuilder().externalView(DB2)); Set hosts = new HashSet(); - for (String p : ev.getPartitionSet()) { + for (String p : ev.getPartitionStringSet()) { for (String hostName : ev.getStateMap(p).keySet()) { InstanceConfig config = accessor.getProperty(accessor.keyBuilder().instanceConfig(hostName)); @@ -85,7 +85,7 @@ public void testInstanceTag() throws Exception { ev = accessor.getProperty(accessor.keyBuilder().externalView(DB3)); hosts = new HashSet(); - for (String p : ev.getPartitionSet()) { + for (String p : ev.getPartitionStringSet()) { for (String hostName : ev.getStateMap(p).keySet()) { InstanceConfig config = accessor.getProperty(accessor.keyBuilder().instanceConfig(hostName)); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestMessagePartitionStateMismatch.java b/helix-core/src/test/java/org/apache/helix/integration/TestMessagePartitionStateMismatch.java index 487e68947e..0d47befc12 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestMessagePartitionStateMismatch.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestMessagePartitionStateMismatch.java @@ -47,8 +47,8 @@ public void testStateMismatch() throws InterruptedException { accessor.getChildValuesMap(accessor.keyBuilder().liveInstances()); for (String instanceName : liveinstanceMap.keySet()) { - String sessionid = liveinstanceMap.get(instanceName).getSessionId(); - for (String partition : ev.getPartitionSet()) { + String sessionid = liveinstanceMap.get(instanceName).getSessionIdString(); + for (String partition : ev.getPartitionStringSet()) { if (ev.getStateMap(partition).containsKey(instanceName)) { String uuid = UUID.randomUUID().toString(); Message message = new Message(MessageType.STATE_TRANSITION, uuid); @@ -78,7 +78,7 @@ public void testStateMismatch() throws InterruptedException { message.setStateModelDef("MasterSlave"); message.setStateModelFactoryName("DEFAULT"); } - accessor.setProperty(accessor.keyBuilder().message(instanceName, message.getMsgId()), + accessor.setProperty(accessor.keyBuilder().message(instanceName, message.getMsgIdString()), message); } } diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestMessagingService.java b/helix-core/src/test/java/org/apache/helix/integration/TestMessagingService.java index 2354ebd2bf..b711e9e991 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestMessagingService.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestMessagingService.java @@ -67,7 +67,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { HelixTaskResult result = new HelixTaskResult(); result.setSuccess(true); Thread.sleep(1000); - System.out.println("TestMessagingHandler " + _message.getMsgId()); + System.out.println("TestMessagingHandler " + _message.getMsgIdString()); _processedMsgIds.add(_message.getRecord().getSimpleField("TestMessagingPara")); result.getTaskResultMap().put("ReplyMessage", "TestReplyMessage"); return result; diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestResetPartitionState.java b/helix-core/src/test/java/org/apache/helix/integration/TestResetPartitionState.java index 09e57c601c..c34281acc0 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestResetPartitionState.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestResetPartitionState.java @@ -52,8 +52,8 @@ public ErrTransitionWithResetCnt(Map> errPartitions) { public void doTransition(Message message, NotificationContext context) { // System.err.println("doReset() invoked"); super.doTransition(message, context); - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); if (fromState.equals("ERROR") && toState.equals("OFFLINE")) { _errToOfflineInvoked++; } @@ -189,7 +189,7 @@ private void clearStatusUpdate(String clusterName, String instance, String resou Builder keyBuilder = accessor.keyBuilder(); LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance)); - accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), + accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionIdString(), resource, partition)); } diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestSchedulerMessage.java b/helix-core/src/test/java/org/apache/helix/integration/TestSchedulerMessage.java index 2c174c4f77..6c3716a7f8 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestSchedulerMessage.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestSchedulerMessage.java @@ -114,13 +114,13 @@ public HelixTaskResult handleMessage() throws InterruptedException { HelixTaskResult result = new HelixTaskResult(); result.setSuccess(true); String destName = _message.getTgtName(); - result.getTaskResultMap().put("Message", _message.getMsgId()); + result.getTaskResultMap().put("Message", _message.getMsgIdString()); synchronized (_results) { if (!_results.containsKey(_message.getPartitionName())) { _results.put(_message.getPartitionName(), new ConcurrentSkipListSet()); } } - _results.get(_message.getPartitionName()).add(_message.getMsgId()); + _results.get(_message.getPartitionName()).add(_message.getMsgIdString()); // System.err.println("Message " + _message.getMsgId() + " executed"); return result; } @@ -169,7 +169,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { _latch.await(); HelixTaskResult result = new HelixTaskResult(); result.setSuccess(true); - result.getTaskResultMap().put("Message", _message.getMsgId()); + result.getTaskResultMap().put("Message", _message.getMsgIdString()); String destName = _message.getTgtName(); synchronized (_results) { if (!_results.containsKey(_message.getPartitionName())) { @@ -236,7 +236,7 @@ public void TestSchedulerMsgUsingQueue() throws Exception { HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); - helixDataAccessor.createProperty(keyBuilder.controllerMessage(schedulerMessage.getMsgId()), + helixDataAccessor.createProperty(keyBuilder.controllerMessage(schedulerMessage.getMsgIdString()), schedulerMessage); for (int i = 0; i < 30; i++) { @@ -249,7 +249,7 @@ public void TestSchedulerMsgUsingQueue() throws Exception { Assert.assertEquals(_PARTITIONS, _factory._results.size()); PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - schedulerMessage.getMsgId()); + schedulerMessage.getMsgIdString()); int messageResultCount = 0; for (int i = 0; i < 10; i++) { @@ -324,7 +324,7 @@ public void TestSchedulerMsg() throws Exception { HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); - helixDataAccessor.createProperty(keyBuilder.controllerMessage(schedulerMessage.getMsgId()), + helixDataAccessor.createProperty(keyBuilder.controllerMessage(schedulerMessage.getMsgIdString()), schedulerMessage); for (int i = 0; i < 30; i++) { @@ -337,7 +337,7 @@ public void TestSchedulerMsg() throws Exception { Assert.assertEquals(_PARTITIONS, _factory._results.size()); PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - schedulerMessage.getMsgId()); + schedulerMessage.getMsgIdString()); int messageResultCount = 0; for (int i = 0; i < 10; i++) { @@ -552,7 +552,7 @@ public void TestSchedulerZeroMsg() throws Exception { HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); - PropertyKey controllerMessageKey = keyBuilder.controllerMessage(schedulerMessage.getMsgId()); + PropertyKey controllerMessageKey = keyBuilder.controllerMessage(schedulerMessage.getMsgIdString()); helixDataAccessor.setProperty(controllerMessageKey, schedulerMessage); Thread.sleep(3000); @@ -560,7 +560,7 @@ public void TestSchedulerZeroMsg() throws Exception { Assert.assertEquals(0, factory._results.size()); PropertyKey controllerTaskStatus = keyBuilder.controllerTaskStatus(MessageType.SCHEDULER_MSG.toString(), - schedulerMessage.getMsgId()); + schedulerMessage.getMsgIdString()); for (int i = 0; i < 10; i++) { StatusUpdate update = helixDataAccessor.getProperty(controllerTaskStatus); if (update == null || update.getRecord().getMapField("SentMessageCount") == null) { diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestStateTransitionTimeout.java b/helix-core/src/test/java/org/apache/helix/integration/TestStateTransitionTimeout.java index edc10c6dfd..7500003e9e 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestStateTransitionTimeout.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestStateTransitionTimeout.java @@ -28,14 +28,13 @@ import org.apache.helix.HelixDataAccessor; import org.apache.helix.NotificationContext; -import org.apache.helix.TestHelper; import org.apache.helix.PropertyKey.Builder; +import org.apache.helix.TestHelper; import org.apache.helix.TestHelper.StartCMResult; import org.apache.helix.controller.HelixControllerMain; import org.apache.helix.manager.zk.ZNRecordSerializer; import org.apache.helix.manager.zk.ZkClient; import org.apache.helix.messaging.handling.MessageHandler.ErrorCode; -import org.apache.helix.mock.participant.MockJobIntf; import org.apache.helix.mock.participant.MockMSStateModel; import org.apache.helix.mock.participant.MockParticipant; import org.apache.helix.mock.participant.MockTransition; @@ -43,7 +42,6 @@ import org.apache.helix.model.ExternalView; import org.apache.helix.model.IdealState; import org.apache.helix.model.Message; -import org.apache.helix.participant.statemachine.StateModel; import org.apache.helix.participant.statemachine.StateModelFactory; import org.apache.helix.participant.statemachine.StateModelInfo; import org.apache.helix.participant.statemachine.StateTransitionError; @@ -178,7 +176,7 @@ public void testStateTransitionTimeOut() throws Exception { String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i); SleepStateModelFactory factory = new SleepStateModelFactory(1000); factories.put(instanceName, factory); - for (String p : idealState.getPartitionSet()) { + for (String p : idealState.getPartitionStringSet()) { if (idealState.getPreferenceList(p).get(0).equals(instanceName)) { factory.addPartition(p); } @@ -200,7 +198,7 @@ public void testStateTransitionTimeOut() throws Exception { Builder kb = accessor.keyBuilder(); ExternalView ev = accessor.getProperty(kb.externalView(TEST_DB)); - for (String p : idealState.getPartitionSet()) { + for (String p : idealState.getPartitionStringSet()) { String idealMaster = idealState.getPreferenceList(p).get(0); Assert.assertTrue(ev.getStateMap(p).get(idealMaster).equals("ERROR")); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestStatusUpdate.java b/helix-core/src/test/java/org/apache/helix/integration/TestStatusUpdate.java index 4b92670d8a..f59ca1dd4c 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestStatusUpdate.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestStatusUpdate.java @@ -49,7 +49,7 @@ public void testParticipantStatusUpdates() throws Exception { for (ExternalView extView : extViews) { String resourceName = extView.getResourceName(); - Set partitionSet = extView.getPartitionSet(); + Set partitionSet = extView.getPartitionStringSet(); for (String partition : partitionSet) { Map stateMap = extView.getStateMap(partition); for (String instance : stateMap.keySet()) { diff --git a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkClusterManager.java b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkClusterManager.java index 78097119a8..3dc0564665 100644 --- a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkClusterManager.java +++ b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkClusterManager.java @@ -191,7 +191,7 @@ public ZNRecord getAdditionalLiveInstanceInfo() { Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1); Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1); Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 5); - Assert.assertFalse(liveInstance.getSessionId().equals("value")); + Assert.assertFalse(liveInstance.getSessionIdString().equals("value")); Assert.assertFalse(liveInstance.getLiveInstance().equals("value")); // ////////////////////////////////// @@ -207,9 +207,9 @@ public ZNRecord getAdditionalLiveInstanceInfo() { Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1); Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1); Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 5); - Assert.assertFalse(liveInstance.getSessionId().equals("value")); + Assert.assertFalse(liveInstance.getSessionIdString().equals("value")); Assert.assertFalse(liveInstance.getLiveInstance().equals("value")); - String sessionId = liveInstance.getSessionId(); + String sessionId = liveInstance.getSessionIdString(); ZkTestHelper.expireSession(manager2.getZkClient()); Thread.sleep(1000); @@ -218,9 +218,9 @@ public ZNRecord getAdditionalLiveInstanceInfo() { Assert.assertTrue(liveInstance.getRecord().getListFields().size() == 1); Assert.assertTrue(liveInstance.getRecord().getMapFields().size() == 1); Assert.assertTrue(liveInstance.getRecord().getSimpleFields().size() == 5); - Assert.assertFalse(liveInstance.getSessionId().equals("value")); + Assert.assertFalse(liveInstance.getSessionIdString().equals("value")); Assert.assertFalse(liveInstance.getLiveInstance().equals("value")); - Assert.assertFalse(sessionId.equals(liveInstance.getSessionId())); + Assert.assertFalse(sessionId.equals(liveInstance.getSessionIdString())); System.out.println("END " + className + ".testLiveInstanceInfoProvider() at " + new Date(System.currentTimeMillis())); diff --git a/helix-core/src/test/java/org/apache/helix/messaging/TestAsyncCallbackSvc.java b/helix-core/src/test/java/org/apache/helix/messaging/TestAsyncCallbackSvc.java index 2be955ffc7..9f039d4b02 100644 --- a/helix-core/src/test/java/org/apache/helix/messaging/TestAsyncCallbackSvc.java +++ b/helix-core/src/test/java/org/apache/helix/messaging/TestAsyncCallbackSvc.java @@ -59,7 +59,7 @@ public void onTimeOut() { @Override public void onReplyMessage(Message message) { // TODO Auto-generated method stub - _repliedMessageId.add(message.getMsgId()); + _repliedMessageId.add(message.getMsgIdString()); } } @@ -77,14 +77,14 @@ public void testAsyncCallbackSvc() throws Exception { try { MessageHandler aHandler = svc.createHandler(msg, changeContext); } catch (HelixException e) { - AssertJUnit.assertTrue(e.getMessage().indexOf(msg.getMsgId()) != -1); + AssertJUnit.assertTrue(e.getMessage().indexOf(msg.getMsgIdString()) != -1); } Message msg2 = new Message("RandomType", UUID.randomUUID().toString()); msg2.setTgtSessionId(manager.getSessionId()); try { MessageHandler aHandler = svc.createHandler(msg2, changeContext); } catch (HelixException e) { - AssertJUnit.assertTrue(e.getMessage().indexOf(msg2.getMsgId()) != -1); + AssertJUnit.assertTrue(e.getMessage().indexOf(msg2.getMsgIdString()) != -1); } Message msg3 = new Message(svc.getMessageType(), UUID.randomUUID().toString()); msg3.setTgtSessionId(manager.getSessionId()); @@ -92,7 +92,7 @@ public void testAsyncCallbackSvc() throws Exception { try { MessageHandler aHandler = svc.createHandler(msg3, changeContext); } catch (HelixException e) { - AssertJUnit.assertTrue(e.getMessage().indexOf(msg3.getMsgId()) != -1); + AssertJUnit.assertTrue(e.getMessage().indexOf(msg3.getMsgIdString()) != -1); } TestAsyncCallback callback = new TestAsyncCallback(); @@ -113,6 +113,6 @@ public void testAsyncCallbackSvc() throws Exception { aHandler.handleMessage(); AssertJUnit.assertTrue(callback.isDone()); - AssertJUnit.assertTrue(callback._repliedMessageId.contains(msg.getMsgId())); + AssertJUnit.assertTrue(callback._repliedMessageId.contains(msg.getMsgIdString())); } } diff --git a/helix-core/src/test/java/org/apache/helix/messaging/handling/TestHelixTaskExecutor.java b/helix-core/src/test/java/org/apache/helix/messaging/handling/TestHelixTaskExecutor.java index 1ff6595efe..6c1f9fc26c 100644 --- a/helix-core/src/test/java/org/apache/helix/messaging/handling/TestHelixTaskExecutor.java +++ b/helix-core/src/test/java/org/apache/helix/messaging/handling/TestHelixTaskExecutor.java @@ -60,7 +60,7 @@ public TestMessageHandler(Message message, NotificationContext context) { @Override public HelixTaskResult handleMessage() throws InterruptedException { HelixTaskResult result = new HelixTaskResult(); - _processedMsgIds.put(_message.getMsgId(), _message.getMsgId()); + _processedMsgIds.put(_message.getMsgIdString(), _message.getMsgIdString()); Thread.sleep(100); result.setSuccess(true); return result; @@ -127,14 +127,14 @@ public HelixTaskResult handleMessage() throws InterruptedException { if (_message.getRecord().getSimpleFields().containsKey("Cancelcount")) { sleepTimes = 10; } - _processingMsgIds.put(_message.getMsgId(), _message.getMsgId()); + _processingMsgIds.put(_message.getMsgIdString(), _message.getMsgIdString()); try { for (int i = 0; i < sleepTimes; i++) { Thread.sleep(100); } } catch (InterruptedException e) { _interrupted = true; - _timedOutMsgIds.put(_message.getMsgId(), ""); + _timedOutMsgIds.put(_message.getMsgIdString(), ""); result.setInterrupted(true); if (!_message.getRecord().getSimpleFields().containsKey("Cancelcount")) { _message.getRecord().setSimpleField("Cancelcount", "1"); @@ -144,7 +144,7 @@ public HelixTaskResult handleMessage() throws InterruptedException { } throw e; } - _processedMsgIds.put(_message.getMsgId(), _message.getMsgId()); + _processedMsgIds.put(_message.getMsgIdString(), _message.getMsgIdString()); result.setSuccess(true); return result; } diff --git a/helix-core/src/test/java/org/apache/helix/mock/participant/ErrTransition.java b/helix-core/src/test/java/org/apache/helix/mock/participant/ErrTransition.java index 301cd62d29..0fddcbf571 100644 --- a/helix-core/src/test/java/org/apache/helix/mock/participant/ErrTransition.java +++ b/helix-core/src/test/java/org/apache/helix/mock/participant/ErrTransition.java @@ -46,8 +46,8 @@ public ErrTransition(Map> errPartitions) { @Override public void doTransition(Message message, NotificationContext context) { - String fromState = message.getFromState(); - String toState = message.getToState(); + String fromState = message.getFromStateString(); + String toState = message.getToStateString(); String partition = message.getPartitionName(); String key = (fromState + "-" + toState).toUpperCase(); diff --git a/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java b/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java index c58f94d11b..1f28f4b37f 100644 --- a/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java +++ b/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java @@ -652,7 +652,7 @@ public void testInstanceGroupTags() throws Exception { IdealState dbIs = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); Set hosts = new HashSet(); - for (String p : dbIs.getPartitionSet()) { + for (String p : dbIs.getPartitionStringSet()) { for (String hostName : dbIs.getInstanceStateMap(p).keySet()) { InstanceConfig config = accessor.getProperty(accessor.keyBuilder().instanceConfig(hostName)); @@ -674,7 +674,7 @@ public void testInstanceGroupTags() throws Exception { dbIs = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); hosts = new HashSet(); - for (String p : dbIs.getPartitionSet()) { + for (String p : dbIs.getPartitionStringSet()) { for (String hostName : dbIs.getInstanceStateMap(p).keySet()) { InstanceConfig config = accessor.getProperty(accessor.keyBuilder().instanceConfig(hostName)); @@ -702,7 +702,7 @@ public void testInstanceGroupTags() throws Exception { ClusterSetup.processCommandLineArgs(command.split("\\s+")); dbIs = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); hosts = new HashSet(); - for (String p : dbIs.getPartitionSet()) { + for (String p : dbIs.getPartitionStringSet()) { for (String hostName : dbIs.getInstanceStateMap(p).keySet()) { InstanceConfig config = accessor.getProperty(accessor.keyBuilder().instanceConfig(hostName)); diff --git a/helix-examples/src/main/config/log4j.properties b/helix-examples/src/main/config/log4j.properties new file mode 100644 index 0000000000..4b3dc31577 --- /dev/null +++ b/helix-examples/src/main/config/log4j.properties @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=ERROR,A1 + +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n + +log4j.logger.org.I0Itec=ERROR +log4j.logger.org.apache=ERROR diff --git a/helix-examples/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java b/helix-examples/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java index affbea8b72..b5d604cbd7 100644 --- a/helix-examples/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java +++ b/helix-examples/src/main/java/org/apache/helix/examples/MasterSlaveStateModelFactory.java @@ -75,8 +75,8 @@ public void setInstanceName(String instanceName) { public void onBecomeSlaveFromOffline(Message message, NotificationContext context) { - System.out.println(_instanceName + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + partitionName); + System.out.println(_instanceName + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + partitionName); sleep(); } @@ -89,22 +89,22 @@ private void sleep() { } public void onBecomeSlaveFromMaster(Message message, NotificationContext context) { - System.out.println(_instanceName + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + partitionName); + System.out.println(_instanceName + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + partitionName); sleep(); } public void onBecomeMasterFromSlave(Message message, NotificationContext context) { - System.out.println(_instanceName + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + partitionName); + System.out.println(_instanceName + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + partitionName); sleep(); } public void onBecomeOfflineFromSlave(Message message, NotificationContext context) { - System.out.println(_instanceName + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + partitionName); + System.out.println(_instanceName + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + partitionName); sleep(); } diff --git a/helix-examples/src/main/java/org/apache/helix/examples/Quickstart.java b/helix-examples/src/main/java/org/apache/helix/examples/Quickstart.java index b80d458b58..0e12fc8c6c 100644 --- a/helix-examples/src/main/java/org/apache/helix/examples/Quickstart.java +++ b/helix-examples/src/main/java/org/apache/helix/examples/Quickstart.java @@ -210,7 +210,7 @@ private static void stopNode() { private static void printState(String msg) { System.out.println("CLUSTER STATE: " + msg); ExternalView resourceExternalView = admin.getResourceExternalView(CLUSTER_NAME, RESOURCE_NAME); - TreeSet sortedSet = new TreeSet(resourceExternalView.getPartitionSet()); + TreeSet sortedSet = new TreeSet(resourceExternalView.getPartitionStringSet()); StringBuilder sb = new StringBuilder("\t\t"); for (int i = 0; i < NUM_NODES; i++) { sb.append(INSTANCE_CONFIG_LIST.get(i).getInstanceName()).append("\t"); diff --git a/recipes/distributed-lock-manager/src/main/java/org/apache/helix/lockmanager/LockManagerDemo.java b/recipes/distributed-lock-manager/src/main/java/org/apache/helix/lockmanager/LockManagerDemo.java index b6c54dbaa6..da913eb0bd 100644 --- a/recipes/distributed-lock-manager/src/main/java/org/apache/helix/lockmanager/LockManagerDemo.java +++ b/recipes/distributed-lock-manager/src/main/java/org/apache/helix/lockmanager/LockManagerDemo.java @@ -115,7 +115,7 @@ public void run() { private static void printStatus(HelixAdmin admin, String cluster, String resource) { ExternalView externalView = admin.getResourceExternalView(cluster, resource); // System.out.println(externalView); - TreeSet treeSet = new TreeSet(externalView.getPartitionSet()); + TreeSet treeSet = new TreeSet(externalView.getPartitionStringSet()); System.out.println("lockName" + "\t" + "acquired By"); System.out.println("======================================"); for (String lockName : treeSet) { diff --git a/recipes/rsync-replicated-file-system/src/main/java/org/apache/helix/filestore/FileStoreStateModel.java b/recipes/rsync-replicated-file-system/src/main/java/org/apache/helix/filestore/FileStoreStateModel.java index 6809a8767e..62d0370a36 100644 --- a/recipes/rsync-replicated-file-system/src/main/java/org/apache/helix/filestore/FileStoreStateModel.java +++ b/recipes/rsync-replicated-file-system/src/main/java/org/apache/helix/filestore/FileStoreStateModel.java @@ -124,12 +124,12 @@ public FileStoreStateModel(HelixManager manager, String resource, String partiti @Transition(from = "OFFLINE", to = "SLAVE") public void onBecomeSlaveFromOffline(Message message, NotificationContext context) throws Exception { - System.out.println(_serverId + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + _partition); + System.out.println(_serverId + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + _partition); replicator.start(); - System.out.println(_serverId + " transitioned from " + message.getFromState() + " to " - + message.getToState() + " for " + _partition); + System.out.println(_serverId + " transitioned from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + _partition); } /** @@ -143,8 +143,8 @@ public void onBecomeSlaveFromOffline(Message message, NotificationContext contex public void onBecomeMasterFromSlave(final Message message, NotificationContext context) throws Exception { replicator.stop(); - System.out.println(_serverId + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + _partition); + System.out.println(_serverId + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + _partition); ZkHelixPropertyStore helixPropertyStore = context.getManager().getHelixPropertyStore(); String checkpointDirPath = instanceConfig.getRecord().getSimpleField("check_point_dir"); @@ -168,8 +168,8 @@ public void onBecomeMasterFromSlave(final Message message, NotificationContext c long now = System.currentTimeMillis(); service = new FileSystemWatchService(fileStoreDir, now, generator); service.start(); - System.out.println(_serverId + " transitioned from " + message.getFromState() + " to " - + message.getToState() + " for " + _partition); + System.out.println(_serverId + " transitioned from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + _partition); } /** @@ -183,16 +183,16 @@ public void onBecomeMasterFromSlave(final Message message, NotificationContext c public void onBecomeSlaveFromMaster(Message message, NotificationContext context) throws Exception { service.stop(); - LOG.info(_serverId + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + _partition); + LOG.info(_serverId + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + _partition); replicator.start(); } @Transition(from = "SLAVE", to = "OFFLINE") public void onBecomeOfflineFromSlave(Message message, NotificationContext context) { replicator.stop(); - LOG.info(_serverId + " transitioning from " + message.getFromState() + " to " - + message.getToState() + " for " + _partition); + LOG.info(_serverId + " transitioning from " + message.getFromStateString() + " to " + + message.getToStateString() + " for " + _partition); } public void onBecomeDroppedFromOffline(Message message, NotificationContext context) { diff --git a/recipes/task-execution/src/main/java/org/apache/helix/taskexecution/Task.java b/recipes/task-execution/src/main/java/org/apache/helix/taskexecution/Task.java index 0cc8bba6af..6030186306 100644 --- a/recipes/task-execution/src/main/java/org/apache/helix/taskexecution/Task.java +++ b/recipes/task-execution/src/main/java/org/apache/helix/taskexecution/Task.java @@ -80,7 +80,7 @@ private boolean areParentTasksDone(List externalViewList) { } private boolean isParentTaskDone(ExternalView ev) { - Set partitionSet = ev.getPartitionSet(); + Set partitionSet = ev.getPartitionStringSet(); if (partitionSet.isEmpty()) { return false; } From becfda1c3a5014fe6da67b280885909841e4953a Mon Sep 17 00:00:00 2001 From: zzhang Date: Thu, 29 Aug 2013 15:33:58 -0700 Subject: [PATCH 009/113] [HELIX-109] Review Helix model package, more accessor changes, rb=13878 --- .../java/org/apache/helix/PropertyKey.java | 2 +- .../org/apache/helix/api/ClusterAccessor.java | 46 +++-- .../apache/helix/api/ControllerAccessor.java | 2 +- .../apache/helix/api/ParticipantAccessor.java | 161 ++++++++++++------ .../apache/helix/api/ResourceAccessor.java | 20 ++- .../stages/ExternalViewComputeStage.java | 2 +- ...DefaultSchedulerMessageHandlerFactory.java | 4 +- .../apache/helix/manager/zk/ZKHelixAdmin.java | 10 +- .../participant/HelixCustomCodeRunner.java | 2 +- .../org/apache/helix/tools/ClusterSetup.java | 2 +- .../java/org/apache/helix/TestZKCallback.java | 2 +- .../java/org/apache/helix/ZkUnitTestBase.java | 4 +- .../controller/stages/BaseStageTest.java | 2 +- ...estBestPossibleCalcStageCompatibility.java | 2 +- .../stages/TestCompatibilityCheckStage.java | 2 +- .../stages/TestResourceComputationStage.java | 4 +- .../TestAddStateModelFactoryAfterConnect.java | 4 +- .../helix/integration/TestAutoRebalance.java | 2 +- .../TestAutoRebalancePartitionLimit.java | 2 +- .../helix/integration/TestBatchMessage.java | 12 +- .../integration/TestBatchMessageWrapper.java | 4 +- .../integration/TestBucketizedResource.java | 4 +- .../TestCustomizedIdealStateRebalancer.java | 4 +- .../apache/helix/integration/TestDisable.java | 8 +- .../apache/helix/integration/TestDrop.java | 2 +- .../integration/TestRenamePartition.java | 8 +- .../helix/integration/TestSchemataSM.java | 2 +- .../helix/integration/TestSwapInstance.java | 8 +- .../manager/zk/TestZNRecordSizeLimit.java | 22 +-- .../helix/manager/zk/TestZkHelixAdmin.java | 4 +- .../mbeans/TestResourceMonitor.java | 2 +- .../apache/helix/tools/TestHelixAdminCli.java | 10 +- 32 files changed, 222 insertions(+), 143 deletions(-) diff --git a/helix-core/src/main/java/org/apache/helix/PropertyKey.java b/helix-core/src/main/java/org/apache/helix/PropertyKey.java index 2f2031963c..0d276d1922 100644 --- a/helix-core/src/main/java/org/apache/helix/PropertyKey.java +++ b/helix-core/src/main/java/org/apache/helix/PropertyKey.java @@ -156,7 +156,7 @@ public PropertyKey idealStates() { * @param resourceName * @return {@link PropertyKey} */ - public PropertyKey idealStates(String resourceName) { + public PropertyKey idealState(String resourceName) { return new PropertyKey(IDEALSTATES, IdealState.class, _clusterName, resourceName); } diff --git a/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java index ac3b7153f2..5902a24925 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ClusterAccessor.java @@ -33,8 +33,11 @@ import org.apache.helix.model.LiveInstance; import org.apache.helix.model.Message; import org.apache.helix.model.PauseSignal; +import org.apache.log4j.Logger; public class ClusterAccessor { + private static Logger LOG = Logger.getLogger(ClusterAccessor.class); + private final HelixDataAccessor _accessor; private final PropertyKey.Builder _keyBuilder; private final ClusterId _clusterId; @@ -66,6 +69,27 @@ public void createCluster() { } } + /** + * drop a cluster + */ + public void dropCluster() { + LOG.info("Dropping cluster: " + _clusterId); + List liveInstanceNames =_accessor.getChildNames(_keyBuilder.liveInstances()); + if (liveInstanceNames.size() > 0) { + throw new HelixException("Can't drop cluster: " + _clusterId + + " because there are running participant: " + liveInstanceNames + + ", shutdown participants first."); + } + + LiveInstance leader = _accessor.getProperty(_keyBuilder.controllerLeader()); + if (leader != null) { + throw new HelixException("Can't drop cluster: " + _clusterId + ", because leader: " + + leader.getId() + " are running, shutdown leader first."); + } + + // TODO remove cluster structure from zookeeper + } + /** * read entire cluster data * @return cluster @@ -103,14 +127,14 @@ public Cluster readCluster() { */ Map> currentStateMap = new HashMap>(); - for (String participantId : liveInstanceMap.keySet()) { - LiveInstance liveInstance = liveInstanceMap.get(participantId); + for (String participantName : liveInstanceMap.keySet()) { + LiveInstance liveInstance = liveInstanceMap.get(participantName); SessionId sessionId = liveInstance.getSessionId(); Map instanceCurStateMap = - _accessor.getChildValuesMap(_keyBuilder.currentStates(participantId, + _accessor.getChildValuesMap(_keyBuilder.currentStates(participantName, sessionId.stringify())); - currentStateMap.put(participantId, instanceCurStateMap); + currentStateMap.put(participantName, instanceCurStateMap); } LiveInstance leader = _accessor.getProperty(_keyBuilder.controllerLeader()); @@ -130,12 +154,10 @@ public Cluster readCluster() { LiveInstance liveInstance = liveInstanceMap.get(participantName); Map instanceMsgMap = messageMap.get(participantName); - // TODO pass current-state map ParticipantId participantId = new ParticipantId(participantName); - // TODO construct participant - participantMap.put(participantId, new Participant(participantId, null, -1, false, null, null, - null, null, null)); + participantMap.put(participantId, ParticipantAccessor.createParticipant(participantId, + instanceConfig, liveInstance, instanceMsgMap, currentStateMap.get(participantName))); } Map controllerMap = new HashMap(); @@ -174,13 +196,13 @@ public void addResourceToCluster(Resource resource) { } ResourceId resourceId = resource.getId(); - if (_accessor.getProperty(_keyBuilder.idealStates(resourceId.stringify())) != null) { + if (_accessor.getProperty(_keyBuilder.idealState(resourceId.stringify())) != null) { throw new HelixException("Skip adding resource: " + resourceId - + " . Resource ideal state already exists in cluster: " + _clusterId); + + ", because resource ideal state already exists in cluster: " + _clusterId); } // TODO convert rebalancerConfig to idealState - _accessor.createProperty(_keyBuilder.idealStates(resourceId.stringify()), null); + _accessor.createProperty(_keyBuilder.idealState(resourceId.stringify()), null); } /** @@ -189,7 +211,7 @@ public void addResourceToCluster(Resource resource) { */ public void dropResourceFromCluster(ResourceId resourceId) { // TODO check existence - _accessor.removeProperty(_keyBuilder.idealStates(resourceId.stringify())); + _accessor.removeProperty(_keyBuilder.idealState(resourceId.stringify())); _accessor.removeProperty(_keyBuilder.resourceConfig(resourceId.stringify())); } diff --git a/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java index 3a7b3b6e0f..153bab661e 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ControllerAccessor.java @@ -32,7 +32,7 @@ public ControllerAccessor(HelixDataAccessor accessor) { * create leader * @param controllerId */ - public void start(ControllerId controllerId) { + public void connectLeader(ControllerId controllerId) { // TODO impl this } } diff --git a/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java index 9de2c5a43b..58d18dfc2f 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ParticipantAccessor.java @@ -57,23 +57,26 @@ public ParticipantAccessor(ClusterId clusterId, HelixDataAccessor accessor) { } /** + * enable/disable a participant * @param participantId * @param isEnabled */ void enableParticipant(ParticipantId participantId, boolean isEnabled) { - if (_accessor.getProperty(_keyBuilder.instanceConfig(participantId.stringify())) == null) { + String participantName = participantId.stringify(); + if (_accessor.getProperty(_keyBuilder.instanceConfig(participantName)) == null) { throw new HelixException("Config for participant: " + participantId + " does NOT exist in cluster: " + _clusterId); } - InstanceConfig config = new InstanceConfig(participantId.stringify()); + InstanceConfig config = new InstanceConfig(participantName); config.setInstanceEnabled(isEnabled); - _accessor.updateProperty(_keyBuilder.instanceConfig(participantId.stringify()), config); + _accessor.updateProperty(_keyBuilder.instanceConfig(participantName), config); } /** * disable participant + * @param participantId */ public void disableParticipant(ParticipantId participantId) { enableParticipant(participantId, false); @@ -81,20 +84,23 @@ public void disableParticipant(ParticipantId participantId) { /** * enable participant + * @param participantId */ public void enableParticipant(ParticipantId participantId) { - enableParticipant(participantId, false); + enableParticipant(participantId, true); } /** * create messages for participant - * @param msgs + * @param participantId + * @param msgMap map of message-id to message */ - public void insertMessagesToParticipant(ParticipantId participantId, Map msgMap) { + public void sendMessagesToParticipant(ParticipantId participantId, Map msgMap) { + String participantName = participantId.stringify(); List msgKeys = new ArrayList(); List msgs = new ArrayList(); for (MessageId msgId : msgMap.keySet()) { - msgKeys.add(_keyBuilder.message(participantId.stringify(), msgId.stringify())); + msgKeys.add(_keyBuilder.message(participantName, msgId.stringify())); msgs.add(msgMap.get(msgId)); } @@ -103,13 +109,15 @@ public void insertMessagesToParticipant(ParticipantId participantId, Map msgMap) { + public void updateMessageStatus(ParticipantId participantId, Map msgMap) { + String participantName = participantId.stringify(); List msgKeys = new ArrayList(); List msgs = new ArrayList(); for (MessageId msgId : msgMap.keySet()) { - msgKeys.add(_keyBuilder.message(participantId.stringify(), msgId.stringify())); + msgKeys.add(_keyBuilder.message(participantName, msgId.stringify())); msgs.add(msgMap.get(msgId)); } _accessor.setChildren(msgKeys, msgs); @@ -117,12 +125,14 @@ public void setMessagesOfParticipant(ParticipantId participantId, Map msgIdSet) { + String participantName = participantId.stringify(); List msgKeys = new ArrayList(); for (MessageId msgId : msgIdSet) { - msgKeys.add(_keyBuilder.message(participantId.stringify(), msgId.stringify())); + msgKeys.add(_keyBuilder.message(participantName, msgId.stringify())); } // TODO impl batch remove @@ -132,6 +142,7 @@ public void deleteMessagesFromParticipant(ParticipantId participantId, Set partitionIdSet) { + String participantName = participantId.stringify(); + String resourceName = resourceId.stringify(); + // check instanceConfig exists - PropertyKey instanceConfigKey = _keyBuilder.instanceConfig(participantId.stringify()); + PropertyKey instanceConfigKey = _keyBuilder.instanceConfig(participantName); if (_accessor.getProperty(instanceConfigKey) == null) { throw new HelixException("Config for participant: " + participantId + " does NOT exist in cluster: " + _clusterId); } // check resource exist. warn if not - IdealState idealState = _accessor.getProperty(_keyBuilder.idealStates(resourceId.stringify())); + IdealState idealState = _accessor.getProperty(_keyBuilder.idealState(resourceName)); if (idealState == null) { LOG.warn("Disable partitions: " + partitionIdSet + " but Cluster: " + _clusterId + ", resource: " + resourceId @@ -156,12 +170,13 @@ void enablePartitionsForParticipant(final boolean enabled, final ParticipantId p } else { // check partitions exist. warn if not for (PartitionId partitionId : partitionIdSet) { + String partitionName = partitionId.stringify(); if ((idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO && idealState - .getPreferenceList(partitionId.stringify()) == null) + .getPreferenceList(partitionName) == null) || (idealState.getRebalanceMode() == RebalanceMode.CUSTOMIZED && idealState - .getInstanceStateMap(partitionId.stringify()) == null)) { + .getInstanceStateMap(partitionName) == null)) { LOG.warn("Cluster: " + _clusterId + ", resource: " + resourceId + ", partition: " - + partitionId + ", partition does not exist in ideal state"); + + partitionId + ", partition does NOT exist in ideal state"); } } } @@ -174,6 +189,7 @@ void enablePartitionsForParticipant(final boolean enabled, final ParticipantId p for (PartitionId partitionId : partitionIdSet) { partitionNames.add(partitionId.stringify()); } + baseAccessor.update(instanceConfigKey.getPath(), new DataUpdater() { @Override public ZNRecord update(ZNRecord currentData) { @@ -205,7 +221,10 @@ public ZNRecord update(ZNRecord currentData) { } /** - * @param disablePartitionSet + * disable partitions on a participant + * @param participantId + * @param resourceId + * @param disablePartitionIdSet */ public void disablePartitionsForParticipant(ParticipantId participantId, ResourceId resourceId, Set disablePartitionIdSet) { @@ -213,7 +232,10 @@ public void disablePartitionsForParticipant(ParticipantId participantId, Resourc } /** - * @param enablePartitionSet + * enable partitions on a participant + * @param participantId + * @param resourceId + * @param enablePartitionIdSet */ public void enablePartitionsForParticipant(ParticipantId participantId, ResourceId resourceId, Set enablePartitionIdSet) { @@ -221,36 +243,37 @@ public void enablePartitionsForParticipant(ParticipantId participantId, Resource } /** - * create live instance for the participant + * reset partitions on a participant * @param participantId + * @param resourceId + * @param resetPartitionIdSet */ - public void startParticipant(ParticipantId participantId) { + public void resetPartitionsForParticipant(ParticipantId participantId, ResourceId resourceId, + Set resetPartitionIdSet) { // TODO impl this } /** - * read participant related data + * create live instance for the participant * @param participantId - * @return */ - public Participant readParticipant(ParticipantId participantId) { - // read physical model - String participantName = participantId.stringify(); - InstanceConfig instanceConfig = _accessor.getProperty(_keyBuilder.instance(participantName)); - LiveInstance liveInstance = _accessor.getProperty(_keyBuilder.liveInstance(participantName)); - - Map instanceMsgMap = Collections.emptyMap(); - Map instanceCurStateMap = Collections.emptyMap(); - if (liveInstance != null) { - SessionId sessionId = liveInstance.getSessionId(); + public void connectParticipant(ParticipantId participantId) { + // TODO impl this + } - instanceMsgMap = _accessor.getChildValuesMap(_keyBuilder.messages(participantName)); - instanceCurStateMap = - _accessor.getChildValuesMap(_keyBuilder.currentStates(participantName, - sessionId.stringify())); - } + /** + * create a participant based on physical model + * @param participantId + * @param instanceConfig + * @param liveInstance + * @param instanceMsgMap map of message-id to message + * @param instanceCurStateMap map of resource-id to current-state + * @return participant + */ + static Participant createParticipant(ParticipantId participantId, InstanceConfig instanceConfig, + LiveInstance liveInstance, Map instanceMsgMap, + Map instanceCurStateMap) { - // convert to logical model String hostName = instanceConfig.getHostName(); int port = -1; @@ -265,10 +288,8 @@ public Participant readParticipant(ParticipantId participantId) { boolean isEnabled = instanceConfig.getInstanceEnabled(); List disabledPartitions = instanceConfig.getDisabledPartitions(); - Set disabledPartitionIdSet; - if (disabledPartitions == null) { - disabledPartitionIdSet = Collections.emptySet(); - } else { + Set disabledPartitionIdSet = Collections.emptySet(); + if (disabledPartitions != null) { disabledPartitionIdSet = new HashSet(); for (String partitionId : disabledPartitions) { disabledPartitionIdSet.add(new PartitionId(PartitionId.extractResourceId(partitionId), @@ -291,26 +312,53 @@ public Participant readParticipant(ParticipantId participantId) { msgMap.put(new MessageId(msgId), message); } - // TODO convert current state - // Map curStateMap = new HashMap(); - // if (currentStateMap != null) { - // for (String participantId : currentStateMap.keySet()) { - // CurState curState = - // new CurState(_id, new ParticipantId(participantId), currentStateMap.get(participantId)); - // curStateMap.put(new ParticipantId(participantId), curState); - // } - // } + Map curStateMap = new HashMap(); + if (instanceCurStateMap != null) { + + for (String resourceName : instanceCurStateMap.keySet()) { + curStateMap.put(new ResourceId(resourceName), instanceCurStateMap.get(resourceName)); + } + } return new Participant(participantId, hostName, port, isEnabled, disabledPartitionIdSet, tags, runningInstance, null, msgMap); } + /** + * read participant related data + * @param participantId + * @return participant + */ + public Participant readParticipant(ParticipantId participantId) { + // read physical model + String participantName = participantId.stringify(); + InstanceConfig instanceConfig = _accessor.getProperty(_keyBuilder.instance(participantName)); + LiveInstance liveInstance = _accessor.getProperty(_keyBuilder.liveInstance(participantName)); + + Map instanceMsgMap = Collections.emptyMap(); + Map instanceCurStateMap = Collections.emptyMap(); + if (liveInstance != null) { + SessionId sessionId = liveInstance.getSessionId(); + + instanceMsgMap = _accessor.getChildValuesMap(_keyBuilder.messages(participantName)); + instanceCurStateMap = + _accessor.getChildValuesMap(_keyBuilder.currentStates(participantName, + sessionId.stringify())); + } + + return createParticipant(participantId, instanceConfig, liveInstance, instanceMsgMap, + instanceCurStateMap); + } + /** * update resource current state of a participant + * @param resourceId resource id + * @param participantId participant id + * @param sessionId session id * @param curStateUpdate current state change delta */ - public void updateParticipantCurrentState(ParticipantId participantId, SessionId sessionId, - ResourceId resourceId, CurrentState curStateUpdate) { + public void updateCurrentState(ResourceId resourceId, ParticipantId participantId, + SessionId sessionId, CurrentState curStateUpdate) { _accessor.updateProperty( _keyBuilder.currentState(participantId.stringify(), sessionId.stringify(), resourceId.stringify()), curStateUpdate); @@ -318,9 +366,12 @@ public void updateParticipantCurrentState(ParticipantId participantId, SessionId /** * drop resource current state of a participant + * @param resourceId resource id + * @param participantId participant id + * @param sessionId session id */ - public void dropParticipantCurrentState(ParticipantId participantId, SessionId sessionId, - ResourceId resourceId) { + public void dropCurrentState(ResourceId resourceId, ParticipantId participantId, + SessionId sessionId) { _accessor.removeProperty(_keyBuilder.currentState(participantId.stringify(), sessionId.stringify(), resourceId.stringify())); } diff --git a/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java b/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java index c0757b41b1..a3d254303b 100644 --- a/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java +++ b/helix-core/src/main/java/org/apache/helix/api/ResourceAccessor.java @@ -37,30 +37,36 @@ public ResourceAccessor(ClusterId clusterId, HelixDataAccessor accessor) { /** * save resource assignment + * @param resourceId + * @param resourceAssignment */ - public void setRresourceAssignment(ResourceId resourceId, RscAssignment resourceAssignment) { + public void setResourceAssignment(ResourceId resourceId, RscAssignment resourceAssignment) { // TODO impl this } /** * set ideal-state + * @param resourceId + * @param idealState */ - public void setResourceIdealState(ResourceId resourceId, IdealState idealState) { - _accessor.setProperty(_keyBuilder.idealStates(resourceId.stringify()), idealState); + public void setIdealState(ResourceId resourceId, IdealState idealState) { + _accessor.setProperty(_keyBuilder.idealState(resourceId.stringify()), idealState); } /** * set external view of a resource + * @param resourceId * @param extView */ - public void setResourceExternalView(ResourceId resourceId, ExternalView extView) { - _accessor.setProperty(_keyBuilder.idealStates(resourceId.stringify()), extView); + public void setExternalView(ResourceId resourceId, ExternalView extView) { + _accessor.setProperty(_keyBuilder.externalView(resourceId.stringify()), extView); } /** * drop external view of a resource + * @param resourceId */ - public void dropResourceExternalView(ResourceId resourceId) { - _accessor.removeProperty(_keyBuilder.idealStates(resourceId.stringify())); + public void dropExternalView(ResourceId resourceId) { + _accessor.removeProperty(_keyBuilder.externalView(resourceId.stringify())); } } diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java index d50bd9ec41..74e039fc7e 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ExternalViewComputeStage.java @@ -260,7 +260,7 @@ private void updateScheduledTaskStatus(ExternalView ev, HelixManager manager, // Remove the finished (COMPLETED or ERROR) tasks from the SCHEDULER_TASK_RESOURCE idealstate keyBuilder = accessor.keyBuilder(); - accessor.updateProperty(keyBuilder.idealStates(taskQueueIdealState.getResourceName()), delta); + accessor.updateProperty(keyBuilder.idealState(taskQueueIdealState.getResourceName()), delta); } } diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java index a7fcc10367..020c99dbd1 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/DefaultSchedulerMessageHandlerFactory.java @@ -186,7 +186,7 @@ void handleMessageUsingScheduledTaskQueue(Criteria recipientCriteria, Message me int existingTopPartitionId = 0; IdealState currentTaskQueue = _manager.getHelixDataAccessor().getProperty( - accessor.keyBuilder().idealStates(newAddedScheduledTasks.getId())); + accessor.keyBuilder().idealState(newAddedScheduledTasks.getId())); if (currentTaskQueue != null) { existingTopPartitionId = findTopPartitionId(currentTaskQueue) + 1; } @@ -213,7 +213,7 @@ void handleMessageUsingScheduledTaskQueue(Criteria recipientCriteria, Message me } } _manager.getHelixDataAccessor().updateProperty( - accessor.keyBuilder().idealStates(newAddedScheduledTasks.getId()), + accessor.keyBuilder().idealState(newAddedScheduledTasks.getId()), newAddedScheduledTasks); sendSummary.put("MessageCount", "" + taskMessages.size()); } diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java index 8172c61050..7c898acc29 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java @@ -303,7 +303,7 @@ public void resetPartition(String clusterName, String instanceName, String resou } // check resource group exists - IdealState idealState = accessor.getProperty(keyBuilder.idealStates(resourceName)); + IdealState idealState = accessor.getProperty(keyBuilder.idealState(resourceName)); if (idealState == null) { throw new HelixException("Can't reset state for " + resourceName + "/" + partitionNames + " on " + instanceName + ", because " + resourceName + " is not added"); @@ -659,7 +659,7 @@ public IdealState getResourceIdealState(String clusterName, String resourceName) new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); - return accessor.getProperty(keyBuilder.idealStates(resourceName)); + return accessor.getProperty(keyBuilder.idealState(resourceName)); } @Override @@ -668,7 +668,7 @@ public void setResourceIdealState(String clusterName, String resourceName, Ideal new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); } @Override @@ -704,7 +704,7 @@ public void dropResource(String clusterName, String resourceName) { new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); - accessor.removeProperty(keyBuilder.idealStates(resourceName)); + accessor.removeProperty(keyBuilder.idealState(resourceName)); accessor.removeProperty(keyBuilder.resourceConfig(resourceName)); } @@ -910,7 +910,7 @@ public void addClusterToGrandCluster(String clusterName, String grandCluster) { new ZKHelixDataAccessor(grandCluster, new ZkBaseDataAccessor(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(idealState.getResourceName()), idealState); + accessor.setProperty(keyBuilder.idealState(idealState.getResourceName()), idealState); } @Override diff --git a/helix-core/src/main/java/org/apache/helix/participant/HelixCustomCodeRunner.java b/helix-core/src/main/java/org/apache/helix/participant/HelixCustomCodeRunner.java index 6a2490ada4..82488edfca 100644 --- a/helix-core/src/main/java/org/apache/helix/participant/HelixCustomCodeRunner.java +++ b/helix-core/src/main/java/org/apache/helix/participant/HelixCustomCodeRunner.java @@ -142,7 +142,7 @@ public void start() throws Exception { List idealStates = accessor.getChildNames(keyBuilder.idealStates()); while (idealStates == null || !idealStates.contains(_resourceName)) { - accessor.setProperty(keyBuilder.idealStates(_resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(_resourceName), idealState); idealStates = accessor.getChildNames(keyBuilder.idealStates()); } diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java index a39e571963..ba417be527 100644 --- a/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java +++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java @@ -310,7 +310,7 @@ public void swapInstance(String clusterName, String oldInstanceName, String newI accessor.getChildValues(accessor.keyBuilder().idealStates()); for (IdealState idealState : existingIdealStates) { swapInstanceInIdealState(idealState, oldInstanceName, newInstanceName); - accessor.setProperty(accessor.keyBuilder().idealStates(idealState.getResourceName()), + accessor.setProperty(accessor.keyBuilder().idealState(idealState.getResourceName()), idealState); } } diff --git a/helix-core/src/test/java/org/apache/helix/TestZKCallback.java b/helix-core/src/test/java/org/apache/helix/TestZKCallback.java index 74d2987711..72799f0e99 100644 --- a/helix-core/src/test/java/org/apache/helix/TestZKCallback.java +++ b/helix-core/src/test/java/org/apache/helix/TestZKCallback.java @@ -167,7 +167,7 @@ public void testInvocation() throws Exception { idealState.setNumPartitions(400); idealState.setReplicas(Integer.toString(2)); idealState.setStateModelDefRef("StateModeldef"); - accessor.setProperty(keyBuilder.idealStates("db-1234"), idealState); + accessor.setProperty(keyBuilder.idealState("db-1234"), idealState); Thread.sleep(100); AssertJUnit.assertTrue(testListener.idealStateChangeReceived); testListener.Reset(); diff --git a/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java b/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java index 58dd070d5b..4a505dcbac 100644 --- a/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java +++ b/helix-core/src/test/java/org/apache/helix/ZkUnitTestBase.java @@ -176,7 +176,7 @@ public void verifyReplication(ZkClient zkClient, String clusterName, String reso new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(zkClient)); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates(resource)); + IdealState idealState = accessor.getProperty(keyBuilder.idealState(resource)); for (String partitionName : idealState.getPartitionStringSet()) { if (idealState.getRebalanceMode() == RebalanceMode.SEMI_AUTO) { AssertJUnit.assertEquals(repl, idealState.getPreferenceList(partitionName).size()); @@ -295,7 +295,7 @@ protected List setupIdealState(String clusterName, int[] nodes, Stri idealStates.add(idealState); // System.out.println(idealState); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); } return idealStates; } diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/BaseStageTest.java b/helix-core/src/test/java/org/apache/helix/controller/stages/BaseStageTest.java index 64e378ed3e..6dcf72594e 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/BaseStageTest.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/BaseStageTest.java @@ -99,7 +99,7 @@ protected List setupIdealState(int nodes, String[] resources, int pa Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); } return idealStates; } diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestBestPossibleCalcStageCompatibility.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestBestPossibleCalcStageCompatibility.java index 33570a074e..2453bd8ab4 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestBestPossibleCalcStageCompatibility.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestBestPossibleCalcStageCompatibility.java @@ -133,7 +133,7 @@ protected List setupIdealStateDeprecated(int nodes, String[] resourc Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); } return idealStates; } diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java index 0b97e205f5..bce7c2d65c 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestCompatibilityCheckStage.java @@ -54,7 +54,7 @@ private void prepare(String controllerVersion, String participantVersion, idealState.setStateModelDefRef("MasterSlave"); Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); // set live instances record = new ZNRecord("localhost_0"); diff --git a/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java b/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java index 3f6fa757b3..b29663c5fc 100644 --- a/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java +++ b/helix-core/src/test/java/org/apache/helix/controller/stages/TestResourceComputationStage.java @@ -60,7 +60,7 @@ public void testSimple() throws Exception { HelixDataAccessor accessor = manager.getHelixDataAccessor(); Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); ResourceComputationStage stage = new ResourceComputationStage(); runStage(event, new ReadClusterDataStage()); runStage(event, stage); @@ -125,7 +125,7 @@ public void testMultipleResourcesWithSomeDropped() throws Exception { HelixDataAccessor accessor = manager.getHelixDataAccessor(); Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates(resourceName), idealState); + accessor.setProperty(keyBuilder.idealState(resourceName), idealState); idealStates.add(idealState); } diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestAddStateModelFactoryAfterConnect.java b/helix-core/src/test/java/org/apache/helix/integration/TestAddStateModelFactoryAfterConnect.java index 33938ad027..0592883e9f 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestAddStateModelFactoryAfterConnect.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestAddStateModelFactoryAfterConnect.java @@ -86,9 +86,9 @@ public void testBasic() throws Exception { ZkBaseDataAccessor baseAccessor = new ZkBaseDataAccessor(_gZkClient); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB1")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB1")); idealState.setStateModelFactoryName("TestDB1_Factory"); - accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB1"), idealState); setupTool.rebalanceStorageCluster(clusterName, "TestDB1", 3); // assert that we have received OFFLINE->SLAVE messages for all partitions diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java index f873b0e9fd..747a18516c 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalance.java @@ -258,7 +258,7 @@ public boolean verify() { new ZKHelixDataAccessor(_clusterName, new ZkBaseDataAccessor(_client)); Builder keyBuilder = accessor.keyBuilder(); int numberOfPartitions = - accessor.getProperty(keyBuilder.idealStates(_resourceName)).getRecord().getListFields() + accessor.getProperty(keyBuilder.idealState(_resourceName)).getRecord().getListFields() .size(); ClusterDataCache cache = new ClusterDataCache(); cache.refresh(accessor); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java index 2d16e61535..3497290192 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestAutoRebalancePartitionLimit.java @@ -225,7 +225,7 @@ public boolean verify() { new ZKHelixDataAccessor(_clusterName, new ZkBaseDataAccessor(_client)); Builder keyBuilder = accessor.keyBuilder(); int numberOfPartitions = - accessor.getProperty(keyBuilder.idealStates(_resourceName)).getRecord().getListFields() + accessor.getProperty(keyBuilder.idealState(_resourceName)).getRecord().getListFields() .size(); ClusterDataCache cache = new ClusterDataCache(); cache.refresh(accessor); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessage.java b/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessage.java index bf2de1e3f3..5c2c68d52e 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessage.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessage.java @@ -83,9 +83,9 @@ public void testBasic() throws Exception { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setBatchMessageMode(true); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); // register a message listener so we know how many message generated TestZkChildListener listener = new TestZkChildListener(); @@ -168,9 +168,9 @@ public void testChangeBatchMessageMode() throws Exception { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setBatchMessageMode(true); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); // registry a message listener so we know how many message generated TestZkChildListener listener = new TestZkChildListener(); @@ -224,9 +224,9 @@ public void testSubMsgExecutionFail() throws Exception { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setBatchMessageMode(true); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); TestHelper .startController(clusterName, "controller_0", ZK_ADDR, HelixControllerMain.STANDALONE); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessageWrapper.java b/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessageWrapper.java index 2ae8bf3ef4..6640d822d0 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessageWrapper.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestBatchMessageWrapper.java @@ -86,9 +86,9 @@ public void testBasic() throws Exception { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setBatchMessageMode(true); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); ClusterController controller = new ClusterController(clusterName, "controller_0", ZK_ADDR); controller.syncStart(); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestBucketizedResource.java b/helix-core/src/test/java/org/apache/helix/integration/TestBucketizedResource.java index 8e75537f52..155af871bb 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestBucketizedResource.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestBucketizedResource.java @@ -62,9 +62,9 @@ public void testBucketizedResource() throws Exception { // String idealStatePath = PropertyPathConfig.getPath(PropertyType.IDEALSTATES, clusterName, // "TestDB0"); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setBucketSize(1); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); TestHelper .startController(clusterName, "controller_0", ZK_ADDR, HelixControllerMain.STANDALONE); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java index aba14a7db6..86ada96269 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestCustomizedIdealStateRebalancer.java @@ -112,7 +112,7 @@ public void testCustomizedIdealStateRebalancer() throws InterruptedException { for (String partition : ev.getPartitionStringSet()) { Assert.assertEquals(ev.getStateMap(partition).size(), 1); } - IdealState is = accessor.getProperty(keyBuilder.idealStates(db2)); + IdealState is = accessor.getProperty(keyBuilder.idealState(db2)); for (String partition : is.getPartitionStringSet()) { Assert.assertEquals(is.getPreferenceList(partition).size(), 0); Assert.assertEquals(is.getInstanceStateMap(partition).size(), 0); @@ -139,7 +139,7 @@ public boolean verify() { new ZKHelixDataAccessor(_clusterName, new ZkBaseDataAccessor(_client)); Builder keyBuilder = accessor.keyBuilder(); int numberOfPartitions = - accessor.getProperty(keyBuilder.idealStates(_resourceName)).getRecord().getListFields() + accessor.getProperty(keyBuilder.idealState(_resourceName)).getRecord().getListFields() .size(); ClusterDataCache cache = new ClusterDataCache(); cache.refresh(accessor); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestDisable.java b/helix-core/src/test/java/org/apache/helix/integration/TestDisable.java index 3341e6b225..c63e03af38 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestDisable.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestDisable.java @@ -67,9 +67,9 @@ public void testDisableNodeCustomIS() throws Exception { ZkBaseDataAccessor baseAccessor = new ZkBaseDataAccessor(_gZkClient); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setRebalanceMode(RebalanceMode.CUSTOMIZED); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); // start controller ClusterController controller = new ClusterController(clusterName, "controller_0", ZK_ADDR); @@ -234,9 +234,9 @@ public void testDisablePartitionCustomIS() throws Exception { ZkBaseDataAccessor baseAccessor = new ZkBaseDataAccessor(_gZkClient); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); idealState.setRebalanceMode(RebalanceMode.CUSTOMIZED); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); // start controller ClusterController controller = new ClusterController(clusterName, "controller_0", ZK_ADDR); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java b/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java index 84e70efdc2..faf8504563 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestDrop.java @@ -243,7 +243,7 @@ public void testDropErrorPartitionCustomIS() throws Exception { HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuiler = accessor.keyBuilder(); - accessor.setProperty(keyBuiler.idealStates("TestDB0"), isBuilder.build()); + accessor.setProperty(keyBuiler.idealState("TestDB0"), isBuilder.build()); // start controller ClusterController controller = new ClusterController(clusterName, "controller_0", ZK_ADDR); diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java b/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java index c3133cc79f..b7f8d614d6 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestRenamePartition.java @@ -60,11 +60,11 @@ public void testRenamePartitionAutoIS() throws Exception { new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); - IdealState idealState = accessor.getProperty(keyBuilder.idealStates("TestDB0")); + IdealState idealState = accessor.getProperty(keyBuilder.idealState("TestDB0")); List prioList = idealState.getRecord().getListFields().remove("TestDB0_0"); idealState.getRecord().getListFields().put("TestDB0_100", prioList); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); boolean result = ClusterStateVerifier.verifyByPolling(new ClusterStateVerifier.BestPossAndExtViewZkVerifier( @@ -106,13 +106,13 @@ public void testRenamePartitionCustomIS() throws Exception { new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); startAndVerify(clusterName); Map stateMap = idealState.getRecord().getMapFields().remove("TestDB0_0"); idealState.getRecord().getMapFields().put("TestDB0_100", stateMap); - accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); boolean result = ClusterStateVerifier.verifyByPolling(new ClusterStateVerifier.BestPossAndExtViewZkVerifier( diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestSchemataSM.java b/helix-core/src/test/java/org/apache/helix/integration/TestSchemataSM.java index 3024f45757..0db33ddb1d 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestSchemataSM.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestSchemataSM.java @@ -63,7 +63,7 @@ public void testSchemataSM() throws Exception { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); - PropertyKey key = keyBuilder.idealStates("TestSchemata0"); + PropertyKey key = keyBuilder.idealState("TestSchemata0"); IdealState idealState = accessor.getProperty(key); idealState.setReplicas(HelixConstants.StateModelToken.ANY_LIVEINSTANCE.toString()); idealState.getRecord().setListField("TestSchemata0_0", diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestSwapInstance.java b/helix-core/src/test/java/org/apache/helix/integration/TestSwapInstance.java index a1f63aa9b3..c60c1c14ee 100644 --- a/helix-core/src/test/java/org/apache/helix/integration/TestSwapInstance.java +++ b/helix-core/src/test/java/org/apache/helix/integration/TestSwapInstance.java @@ -42,10 +42,10 @@ public void TestSwap() throws Exception { ZNRecord idealStateOld1 = new ZNRecord("TestDB"); ZNRecord idealStateOld2 = new ZNRecord("MyDB"); - IdealState is1 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealStates("TestDB")); + IdealState is1 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealState("TestDB")); idealStateOld1.merge(is1.getRecord()); - IdealState is2 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealStates("MyDB")); + IdealState is2 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealState("MyDB")); idealStateOld2.merge(is2.getRecord()); String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + 0); @@ -88,9 +88,9 @@ public void TestSwap() throws Exception { ZK_ADDR, CLUSTER_NAME)); Assert.assertTrue(result); - is1 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealStates("TestDB")); + is1 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealState("TestDB")); - is2 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealStates("MyDB")); + is2 = helixAccessor.getProperty(helixAccessor.keyBuilder().idealState("MyDB")); for (String key : idealStateOld1.getMapFields().keySet()) { for (String host : idealStateOld1.getMapField(key).keySet()) { diff --git a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java index 286593a4bd..5b5a8e5933 100644 --- a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java +++ b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZNRecordSizeLimit.java @@ -125,7 +125,7 @@ record = zkClient.readData(path1); for (int i = 0; i < 1024; i++) { idealState.getRecord().setSimpleField(i + "", bufStr); } - boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState); + boolean succeed = accessor.setProperty(keyBuilder.idealState("TestDB0"), idealState); Assert.assertFalse(succeed); HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", @@ -141,9 +141,9 @@ record = zkClient.readData(path1); for (int i = 0; i < 900; i++) { idealState.getRecord().setSimpleField(i + "", bufStr); } - succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState); + succeed = accessor.setProperty(keyBuilder.idealState("TestDB1"), idealState); Assert.assertTrue(succeed); - record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord(); + record = accessor.getProperty(keyBuilder.idealState("TestDB1")).getRecord(); Assert.assertTrue(serializer.serialize(record).length > 900 * 1024); // oversized data should not update existing data on zk @@ -155,9 +155,9 @@ record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord(); idealState.getRecord().setSimpleField(i + "", bufStr); } // System.out.println("record: " + idealState.getRecord()); - succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState); + succeed = accessor.updateProperty(keyBuilder.idealState("TestDB1"), idealState); Assert.assertFalse(succeed); - recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord(); + recordNew = accessor.getProperty(keyBuilder.idealState("TestDB1")).getRecord(); arr = serializer.serialize(record); arrNew = serializer.serialize(recordNew); Assert.assertTrue(Arrays.equals(arr, arrNew)); @@ -249,9 +249,9 @@ record = zkClient.readData(path1); for (int i = 0; i < 1024; i++) { idealState.getRecord().setSimpleField(i + "", bufStr); } - boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState); + boolean succeed = accessor.setProperty(keyBuilder.idealState("TestDB_1"), idealState); Assert.assertFalse(succeed); - HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1")); + HelixProperty property = accessor.getProperty(keyBuilder.idealState("TestDB_1")); Assert.assertNull(property); // legal sized data gets written to zk @@ -263,9 +263,9 @@ record = zkClient.readData(path1); for (int i = 0; i < 900; i++) { idealState.getRecord().setSimpleField(i + "", bufStr); } - succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState); + succeed = accessor.setProperty(keyBuilder.idealState("TestDB_2"), idealState); Assert.assertTrue(succeed); - record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord(); + record = accessor.getProperty(keyBuilder.idealState("TestDB_2")).getRecord(); Assert.assertTrue(serializer.serialize(record).length > 900 * 1024); // oversized data should not update existing data on zk @@ -278,9 +278,9 @@ record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord(); idealState.getRecord().setSimpleField(i + "", bufStr); } // System.out.println("record: " + idealState.getRecord()); - succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState); + succeed = accessor.updateProperty(keyBuilder.idealState("TestDB_2"), idealState); Assert.assertFalse(succeed); - recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord(); + recordNew = accessor.getProperty(keyBuilder.idealState("TestDB_2")).getRecord(); arr = serializer.serialize(record); arrNew = serializer.serialize(recordNew); Assert.assertTrue(Arrays.equals(arr, arrNew)); diff --git a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkHelixAdmin.java b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkHelixAdmin.java index 4b3764fb51..874e337c01 100644 --- a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkHelixAdmin.java +++ b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkHelixAdmin.java @@ -212,13 +212,13 @@ public void testDropResource() { .forCluster(clusterName).forResource("test-db").build(), resourceConfig); PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName); - Assert.assertTrue(_gZkClient.exists(keyBuilder.idealStates("test-db").getPath()), + Assert.assertTrue(_gZkClient.exists(keyBuilder.idealState("test-db").getPath()), "test-db ideal-state should exist"); Assert.assertTrue(_gZkClient.exists(keyBuilder.resourceConfig("test-db").getPath()), "test-db resource config should exist"); tool.dropResource(clusterName, "test-db"); - Assert.assertFalse(_gZkClient.exists(keyBuilder.idealStates("test-db").getPath()), + Assert.assertFalse(_gZkClient.exists(keyBuilder.idealState("test-db").getPath()), "test-db ideal-state should be dropped"); Assert.assertFalse(_gZkClient.exists(keyBuilder.resourceConfig("test-db").getPath()), "test-db resource config should be dropped"); diff --git a/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java b/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java index 6712c40019..f5e41de744 100644 --- a/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java +++ b/helix-core/src/test/java/org/apache/helix/monitoring/mbeans/TestResourceMonitor.java @@ -114,7 +114,7 @@ public void TestReportData() { HelixDataAccessor helixDataAccessor = manager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); ExternalView externalView = helixDataAccessor.getProperty(keyBuilder.externalView(_dbName)); - IdealState idealState = helixDataAccessor.getProperty(keyBuilder.idealStates(_dbName)); + IdealState idealState = helixDataAccessor.getProperty(keyBuilder.idealState(_dbName)); monitor.updateExternalView(externalView, idealState); diff --git a/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java b/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java index 1f28f4b37f..6935de46b3 100644 --- a/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java +++ b/helix-core/src/test/java/org/apache/helix/tools/TestHelixAdminCli.java @@ -342,7 +342,7 @@ public void testDropAddResource() throws Exception { // save ideal state BaseDataAccessor baseAccessor = new ZkBaseDataAccessor(_gZkClient); HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, baseAccessor); - IdealState idealState = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); + IdealState idealState = accessor.getProperty(accessor.keyBuilder().idealState("db_11")); ZNRecordJsonSerializer serializer = new ZNRecordJsonSerializer(); String tmpDir = System.getProperty("java.io.tmpdir"); @@ -371,7 +371,7 @@ public void testDropAddResource() throws Exception { clusterName)); Assert.assertTrue(verifyResult); - IdealState idealState2 = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); + IdealState idealState2 = accessor.getProperty(accessor.keyBuilder().idealState("db_11")); Assert.assertTrue(idealState2.getRecord().equals(idealState.getRecord())); // clean up @@ -650,7 +650,7 @@ public void testInstanceGroupTags() throws Exception { command = "-zkSvr localhost:2183 -rebalance " + clusterName + " db_11 2 -instanceGroupTag tag1"; ClusterSetup.processCommandLineArgs(command.split("\\s+")); - IdealState dbIs = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); + IdealState dbIs = accessor.getProperty(accessor.keyBuilder().idealState("db_11")); Set hosts = new HashSet(); for (String p : dbIs.getPartitionStringSet()) { for (String hostName : dbIs.getInstanceStateMap(p).keySet()) { @@ -672,7 +672,7 @@ public void testInstanceGroupTags() throws Exception { command = "-zkSvr localhost:2183 -rebalance " + clusterName + " db_11 3 -instanceGroupTag tag2"; ClusterSetup.processCommandLineArgs(command.split("\\s+")); - dbIs = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); + dbIs = accessor.getProperty(accessor.keyBuilder().idealState("db_11")); hosts = new HashSet(); for (String p : dbIs.getPartitionStringSet()) { for (String hostName : dbIs.getInstanceStateMap(p).keySet()) { @@ -700,7 +700,7 @@ public void testInstanceGroupTags() throws Exception { command = "-zkSvr localhost:2183 -rebalance " + clusterName + " db_11 3 -instanceGroupTag tag2"; ClusterSetup.processCommandLineArgs(command.split("\\s+")); - dbIs = accessor.getProperty(accessor.keyBuilder().idealStates("db_11")); + dbIs = accessor.getProperty(accessor.keyBuilder().idealState("db_11")); hosts = new HashSet(); for (String p : dbIs.getPartitionStringSet()) { for (String hostName : dbIs.getInstanceStateMap(p).keySet()) { From c57426501773f4dd36080b934a4e75fd50bc54d3 Mon Sep 17 00:00:00 2001 From: zzhang Date: Mon, 2 Sep 2013 22:08:34 -0700 Subject: [PATCH 010/113] [HELIX-215] YAML-based configuration, new recipe that uses YAML and USER_DEFINED rebalancer, rb=13930 --- src/site/markdown/Architecture.md | 91 +++--- src/site/markdown/Concepts.md | 26 +- src/site/markdown/Quickstart.md | 10 +- src/site/markdown/Tutorial.md | 22 +- src/site/markdown/index.md | 25 +- src/site/markdown/recipes/lock_manager.md | 2 +- .../recipes/rabbitmq_consumer_group.md | 4 +- .../markdown/recipes/user_def_rebalancer.md | 287 ++++++++++++++++++ src/site/markdown/tutorial_controller.md | 2 +- src/site/markdown/tutorial_messaging.md | 4 +- src/site/markdown/tutorial_participant.md | 5 +- src/site/markdown/tutorial_rebalance.md | 51 ++-- src/site/markdown/tutorial_spectator.md | 4 +- .../markdown/tutorial_user_def_rebalancer.md | 196 ++++++++++++ src/site/markdown/tutorial_yaml.md | 98 ++++++ src/site/site.xml | 1 + 16 files changed, 712 insertions(+), 116 deletions(-) create mode 100644 src/site/markdown/recipes/user_def_rebalancer.md create mode 100644 src/site/markdown/tutorial_user_def_rebalancer.md create mode 100644 src/site/markdown/tutorial_yaml.md diff --git a/src/site/markdown/Architecture.md b/src/site/markdown/Architecture.md index 7acf590fbf..ac96443703 100644 --- a/src/site/markdown/Architecture.md +++ b/src/site/markdown/Architecture.md @@ -29,16 +29,16 @@ Helix aims to provide the following abilities to a distributed system: * Monitor cluster health and provide alerts on SLA violation. * Service discovery mechanism to route requests. -To build such a system, we need a mechanism to co-ordinate between different nodes/components in the system. This mechanism can be achieved with a software that reacts to any change in the cluster and comes up with a set of tasks needed to bring the cluster to a stable state. The set of tasks will be assigned to one or more nodes in the cluster. Helix serves this purpose of managing the various components in the cluster. +To build such a system, we need a mechanism to co-ordinate between different nodes and other components in the system. This mechanism can be achieved with software that reacts to any change in the cluster and comes up with a set of tasks needed to bring the cluster to a stable state. The set of tasks will be assigned to one or more nodes in the cluster. Helix serves this purpose of managing the various components in the cluster. ![Helix Design](images/system.png) Distributed System Components -In general any distributed system cluster will have the following +In general any distributed system cluster will have the following components and properties: -* Set of nodes also referred to as an instance. -* Set of resources which can be a database, lucene index or a task. +* A set of nodes also referred to as instances. +* A set of resources which can be databases, lucene indexes or tasks. * Each resource is also partitioned into one or more Partitions. * Each partition may have one or more copies called replicas. * Each replica can have a state associated with it. For example Master, Slave, Leader, Standby, Online, Offline etc @@ -48,47 +48,46 @@ Roles ![Helix Design](images/HELIX-components.png) -Not all nodes in a distributed system will perform similar functionality. For e.g, a few nodes might be serving requests, few nodes might be sending the request and some nodes might be controlling the nodes in the cluster. Based on functionality we have grouped them into +Not all nodes in a distributed system will perform similar functionalities. For example, a few nodes might be serving requests and a few nodes might be sending requests, and some nodes might be controlling the nodes in the cluster. Thus, Helix categorizes nodes by their specific roles in the system. -We have divided Helix in 3 logical components based on their responsibility - -1. PARTICIPANT: The nodes that actually host the distributed resources. -2. SPECTATOR: The nodes that simply observe the PARTICIPANT State and route the request accordingly. Routers, for example, need to know the Instance on which a partition is hosted and its state in order to route the request to the appropriate end point. -3. CONTROLLER: The controller observes and controls the PARTICIPANT nodes. It is responsible for coordinating all transitions in the cluster and ensuring that state constraints are satisfied and cluster stability is maintained. +We have divided Helix nodes into 3 logical components based on their responsibilities: +1. Participant: The nodes that actually host the distributed resources. +2. Spectator: The nodes that simply observe the Participant state and route the request accordingly. Routers, for example, need to know the instance on which a partition is hosted and its state in order to route the request to the appropriate end point. +3. Controller: The controller observes and controls the Participant nodes. It is responsible for coordinating all transitions in the cluster and ensuring that state constraints are satisfied and cluster stability is maintained. These are simply logical components and can be deployed as per the system requirements. For example: -1. Controller can be deployed as a separate service -2. Controller can be deployed along with a Participant but only one Controller will be active at any given time. +1. The controller can be deployed as a separate service +2. The controller can be deployed along with a Participant but only one Controller will be active at any given time. Both have pros and cons, which will be discussed later and one can chose the mode of deployment as per system needs. -## Cluster state/metadata store +## Cluster state metadata store We need a distributed store to maintain the state of the cluster and a notification system to notify if there is any change in the cluster state. Helix uses Zookeeper to achieve this functionality. Zookeeper provides: * A way to represent PERSISTENT state which basically remains until its deleted. -* A way to represent TRANSIENT/EPHEMERAL state which vanishes when the process that created the STATE dies. -* Notification mechanism when there is a change in PERSISTENT/EPHEMERAL STATE +* A way to represent TRANSIENT/EPHEMERAL state which vanishes when the process that created the state dies. +* Notification mechanism when there is a change in PERSISTENT and EPHEMERAL state -The namespace provided by ZooKeeper is much like that of a standard file system. A name is a sequence of path elements separated by a slash (/). Every node[ZNODE] in ZooKeeper\'s namespace is identified by a path. +The namespace provided by ZooKeeper is much like that of a standard file system. A name is a sequence of path elements separated by a slash (/). Every node[ZNode] in ZooKeeper\'s namespace is identified by a path. -More info on Zookeeper can be found here http://zookeeper.apache.org +More info on Zookeeper can be found at http://zookeeper.apache.org -## Statemachine and constraints +## State machine and constraints -Even though the concept of Resource, Partition, Replicas is common to most distributed systems, one thing that differentiates one distributed system from another is the way each partition is assigned a state and the constraints on each state. +Even though the concepts of Resources, Partitions, and Replicas are common to most distributed systems, one thing that differentiates one distributed system from another is the way each partition is assigned a state and the constraints on each state. For example: -1. If a system is serving READ ONLY data then all partition\'s replicas are equal and they can either be ONLINE or OFFLINE. -2. If a system takes BOTH READ and WRITES but ensure that WRITES go through only one partition then the states will be MASTER, SLAVE and OFFLINE. Writes go through the MASTER and is replicated to the SLAVES. Optionally, READS can go through SLAVES. +1. If a system is serving read-only data then all partition\'s replicas are equal and they can either be ONLINE or OFFLINE. +2. If a system takes _both_ reads and writes but ensure that writes go through only one partition, the states will be MASTER, SLAVE, and OFFLINE. Writes go through the MASTER and replicate to the SLAVEs. Optionally, reads can go through SLAVES. -Apart from defining STATE for each partition, the transition path to each STATE can be application specific. For example, in order to become MASTER it might be a requirement to first become a SLAVE. This ensures that if the SLAVE does not have the data as part of OFFLINE-SLAVE transition it can bootstrap data from other nodes in the system. +Apart from defining state for each partition, the transition path to each state can be application specific. For example, in order to become MASTER it might be a requirement to first become a SLAVE. This ensures that if the SLAVE does not have the data as part of OFFLINE-SLAVE transition it can bootstrap data from other nodes in the system. Helix provides a way to configure an application specific state machine along with constraints on each state. Along with constraints on STATE, Helix also provides a way to specify constraints on transitions. (More on this later.) @@ -113,17 +112,17 @@ MASTER | SLAVE | SLAVE | N/A | The following terminologies are used in Helix to model a state machine. -* IDEALSTATE: The state in which we need the cluster to be in if all nodes are up and running. In other words, all state constraints are satisfied. -* CURRENTSTATE: Represents the current state of each node in the cluster -* EXTERNALVIEW: Represents the combined view of CURRENTSTATE of all nodes. +* IdealState: The state in which we need the cluster to be in if all nodes are up and running. In other words, all state constraints are satisfied. +* CurrentState: Represents the actual current state of each node in the cluster +* ExternalView: Represents the combined view of CurrentState of all nodes. -The goal of Helix is always to make the CURRENTSTATE of the system same as the IDEALSTATE. Some scenarios where this may not be true are: +The goal of Helix is always to make the CurrentState of the system same as the IdealState. Some scenarios where this may not be true are: * When all nodes are down * When one or more nodes fail * New nodes are added and the partitions need to be reassigned -### IDEALSTATE +### IdealState Helix lets the application define the IdealState on a resource basis which basically consists of: @@ -140,11 +139,11 @@ Example: * ..... * Partition-p, replica-3, Slave, Node-n -Helix comes with various algorithms to automatically assign the partitions to nodes. The default algorithm minimizes the number of shuffles that happen when new nodes are added to the system +Helix comes with various algorithms to automatically assign the partitions to nodes. The default algorithm minimizes the number of shuffles that happen when new nodes are added to the system. -### CURRENTSTATE +### CurrentState -Every instance in the cluster hosts one or more partitions of a resource. Each of the partitions has a State associated with it. +Every instance in the cluster hosts one or more partitions of a resource. Each of the partitions has a state associated with it. Example Node-1 @@ -154,9 +153,9 @@ Example Node-1 * .... * Partition-p, Slave -### EXTERNALVIEW +### ExternalView -External clients needs to know the state of each partition in the cluster and the Node hosting that partition. Helix provides one view of the system to SPECTATORS as EXTERNAL VIEW. EXTERNAL VIEW is simply an aggregate of all CURRENTSTATE +External clients needs to know the state of each partition in the cluster and the Node hosting that partition. Helix provides one view of the system to Spectators as _ExternalView_. ExternalView is simply an aggregate of all node CurrentStates. * Partition-1, replica-1, Master, Node-1 * Partition-1, replica-2, Slave, Node-2 @@ -171,28 +170,28 @@ Mode of operation in a cluster A node process can be one of the following: -* PARTICIPANT: The process registers itself in the cluster and acts on the messages received in its queue and updates the current state. Example: Storage Node -* SPECTATOR: The process is simply interested in the changes in the Externalview. The Router is a spectator of the Storage cluster. -* CONTROLLER: This process actively controls the cluster by reacting to changes in Cluster State and sending messages to PARTICIPANTS. +* Participant: The process registers itself in the cluster and acts on the messages received in its queue and updates the current state. Example: a storage node in a distributed database +* Spectator: The process is simply interested in the changes in the Externalview. +* Controller: This process actively controls the cluster by reacting to changes in cluster state and sending messages to Participants. ### Participant Node Process -* When Node starts up, it registers itself under LIVEINSTANCES -* After registering, it waits for new Messages in the message queue +* When Node starts up, it registers itself under _LiveInstances_ +* After registering, it waits for new _Messages_ in the message queue * When it receives a message, it will perform the required task as indicated in the message -* After the task is completed, depending on the task outcome it updates the CURRENTSTATE +* After the task is completed, depending on the task outcome it updates the CurrentState ### Controller Process -* Watches IDEALSTATE -* Node goes down/comes up or Node is added/removed. Watches LIVEINSTANCES and CURRENTSTATE of each Node in the cluster -* Triggers appropriate state transition by sending message to PARTICIPANT +* Watches IdealState +* Notified when a node goes down/comes up or node is added/removed. Watches LiveInstances and CurrentState of each node in the cluster +* Triggers appropriate state transitions by sending message to Participants ### Spectator Process -* When the process starts, it asks cluster manager agent to be notified of changes in ExternalView -* Whenever it receives a notification, it reads the Externalview and performs required duties. For the Router, it updates its routing table. +* When the process starts, it asks the Helix agent to be notified of changes in ExternalView +* Whenever it receives a notification, it reads the Externalview and performs required duties. #### Interaction between controller, participant and spectator @@ -212,11 +211,11 @@ The following picture shows how controllers, participants and spectators interac * If a task is dependent on another task being completed, do not add that task * After any task is completed by a Participant, Controllers gets notified of the change and the State Transition algorithm is re-run until the CurrentState is same as IdealState. -## Helix znode layout +## Helix ZNode layout Helix organizes znodes under clusterName in multiple levels. -The top level (under clusterName) znodes are all Helix defined and in upper case: +The top level (under the cluster name) ZNodes are all Helix-defined and in upper case: * PROPERTYSTORE: application property store * STATEMODELDEFES: state model definitions @@ -227,7 +226,7 @@ The top level (under clusterName) znodes are all Helix defined and in upper case * LIVEINSTANCES: live instances * CONTROLLER: cluster controller runtime information -Under INSTANCES, there are runtime znodes for each instance. An instance organizes znodes as follows: +Under INSTANCES, there are runtime ZNodes for each instance. An instance organizes ZNodes as follows: * CURRENTSTATES * sessionId diff --git a/src/site/markdown/Concepts.md b/src/site/markdown/Concepts.md index 02d74065e9..e6bcca0abd 100644 --- a/src/site/markdown/Concepts.md +++ b/src/site/markdown/Concepts.md @@ -48,7 +48,7 @@ Consider a simple case where you want to launch a task \'myTask\' on node \'N1\' ``` ### Partition -If this task get too big to fit on one box, you might want to divide it into subTasks. Each subTask is referred to as a _partition_ in Helix. Let\'s say you want to divide the task into 3 subTasks/partitions, the IdealState can be changed as shown below. +If this task get too big to fit on one box, you might want to divide it into subtasks. Each subtask is referred to as a _partition_ in Helix. Let\'s say you want to divide the task into 3 subtasks/partitions, the IdealState can be changed as shown below. \'myTask_0\', \'myTask_1\', \'myTask_2\' are logical names representing the partitions of myTask. Each tasks runs on N1, N2 and N3 respectively. @@ -74,7 +74,7 @@ If this task get too big to fit on one box, you might want to divide it into sub ### Replica -Partitioning allows one to split the data/task into multiple subparts. But let\'s say the request rate each partition increases. The common solution is to have multiple copies for each partition. Helix refers to the copy of a partition as a _replica_. Adding a replica also increases the availability of the system during failures. One can see this methodology employed often in Search systems. The index is divided into shards, and each shard has multiple copies. +Partitioning allows one to split the data/task into multiple subparts. But let\'s say the request rate for each partition increases. The common solution is to have multiple copies for each partition. Helix refers to the copy of a partition as a _replica_. Adding a replica also increases the availability of the system during failures. One can see this methodology employed often in search systems. The index is divided into shards, and each shard has multiple copies. Let\'s say you want to add one additional replica for each task. The IdealState can simply be changed as shown below. @@ -106,7 +106,7 @@ For increasing the availability of the system, it\'s better to place the replica ### State -Now let\'s take a slightly complicated scenario where a task represents a database. Unlike an index which is in general read-only, a database supports both reads and writes. Keeping the data consistent among the replicas is crucial in distributed data stores. One commonly applied technique is to assign one replica as MASTER and remaining replicas as SLAVE. All writes go to the MASTER and are then replicated to the SLAVE replicas. +Now let\'s take a slightly more complicated scenario where a task represents a database. Unlike an index which is in general read-only, a database supports both reads and writes. Keeping the data consistent among the replicas is crucial in distributed data stores. One commonly applied technique is to assign one replica as the MASTER and remaining replicas as SLAVEs. All writes go to the MASTER and are then replicated to the SLAVE replicas. Helix allows one to assign different states to each replica. Let\'s say you have two MySQL instances N1 and N2, where one will serve as MASTER and another as SLAVE. The IdealState can be changed to: @@ -130,14 +130,14 @@ Helix allows one to assign different states to each replica. Let\'s say you have ### State Machine and Transitions -IdealState allows one to exactly specify the desired state of the cluster. Given an IdealState, Helix takes up the responsibility of ensuring that the cluster reaches the IdealState. The Helix _controller_ reads the IdealState and then commands the Participant to take appropriate actions to move from one state to another until it matches the IdealState. These actions are referred to as _transitions_ in Helix. +IdealState allows one to exactly specify the desired state of the cluster. Given an IdealState, Helix takes up the responsibility of ensuring that the cluster reaches the IdealState. The Helix _controller_ reads the IdealState and then commands each Participant to take appropriate actions to move from one state to another until it matches the IdealState. These actions are referred to as _transitions_ in Helix. The next logical question is: how does the _controller_ compute the transitions required to get to IdealState? This is where the finite state machine concept comes in. Helix allows applications to plug in a finite state machine. A state machine consists of the following: * State: Describes the role of a replica -* Transition: An action that allows a replica to move from one State to another, thus changing its role. +* Transition: An action that allows a replica to move from one state to another, thus changing its role. -Here is an example of MASTERSLAVE state machine, +Here is an example of MasterSlave state machine: ``` OFFLINE | SLAVE | MASTER @@ -176,7 +176,7 @@ Helix allows each resource to be associated with one state machine. This means y ### Current State -CurrentState of a resource simply represents its actual state at a PARTICIPANT. In the below example: +CurrentState of a resource simply represents its actual state at a Participant. In the below example: * INSTANCE_NAME: Unique name representing the process * SESSION_ID: ID that is automatically assigned every time a process joins the cluster @@ -206,7 +206,7 @@ Each node in the cluster has its own CurrentState. ### External View -In order to communicate with the PARTICIPANTs, external clients need to know the current state of each of the PARTICIPANTs. The external clients are referred to as SPECTATORS. In order to make the life of SPECTATOR simple, Helix provides an EXTERNALVIEW that is an aggregated view of the current state across all nodes. The EXTERNALVIEW has a similar format as IDEALSTATE. +In order to communicate with the Participants, external clients need to know the current state of each of the Participants. The external clients are referred to as Spectators. In order to make the life of Spectator simple, Helix provides an ExternalView that is an aggregated view of the current state across all nodes. The ExternalView has a similar format as IdealState. ``` { @@ -233,27 +233,27 @@ In order to communicate with the PARTICIPANTs, external clients need to know the ### Rebalancer -The core component of Helix is the CONTROLLER which runs the REBALANCER algorithm on every cluster event. Cluster events can be one of the following: +The core component of Helix is the Controller which runs the Rebalancer algorithm on every cluster event. Cluster events can be one of the following: * Nodes start/stop and soft/hard failures * New nodes are added/removed * Ideal state changes -There are few more such as config changes, etc. The key takeaway: there are many ways to trigger the rebalancer. +There are few more examples such as configuration changes, etc. The key takeaway: there are many ways to trigger the rebalancer. When a rebalancer is run it simply does the following: * Compares the IdealState and current state * Computes the transitions required to reach the IdealState -* Issues the transitions to each PARTICIPANT +* Issues the transitions to each Participant -The above steps happen for every change in the system. Once the current state matches the IdealState, the system is considered stable which implies \'IDEALSTATE = CURRENTSTATE = EXTERNALVIEW\' +The above steps happen for every change in the system. Once the current state matches the IdealState, the system is considered stable which implies \'IdealState = CurrentState = ExternalView\' ### Dynamic IdealState One of the things that makes Helix powerful is that IdealState can be changed dynamically. This means one can listen to cluster events like node failures and dynamically change the ideal state. Helix will then take care of triggering the respective transitions in the system. -Helix comes with a few algorithms to automatically compute the IdealState based on the constraints. For example, if you have a resource of 3 partitions and 2 replicas, Helix can automatically compute the IdealState based on the nodes that are currently active. See the [tutorial](./tutorial_rebalance.html) to find out more about various execution modes of Helix like AUTO_REBALANCE, AUTO and CUSTOM. +Helix comes with a few algorithms to automatically compute the IdealState based on the constraints. For example, if you have a resource of 3 partitions and 2 replicas, Helix can automatically compute the IdealState based on the nodes that are currently active. See the [tutorial](./tutorial_rebalance.html) to find out more about various execution modes of Helix like FULL_AUTO, SEMI_AUTO and CUSTOMIZED. diff --git a/src/site/markdown/Quickstart.md b/src/site/markdown/Quickstart.md index dcffc1b908..574f98b4ed 100644 --- a/src/site/markdown/Quickstart.md +++ b/src/site/markdown/Quickstart.md @@ -138,7 +138,7 @@ Now you can run the same steps by hand. In the detailed version, we\'ll do the * Expand the cluster: add a few nodes and rebalance the partitions * Failover: stop a node and verify the mastership transfer -### Install/Start zookeeper +### Install and Start Zookeeper Zookeeper can be started in standalone mode or replicated mode. @@ -322,7 +322,7 @@ IdealState for myDB: "myDB_5" : [ "localhost_12914", "localhost_12915", "localhost_12913" ] }, "simpleFields" : { - "IDEAL_STATE_MODE" : "AUTO", + "REBALANCE_MODE" : "SEMI_AUTO", "NUM_PARTITIONS" : "6", "REPLICAS" : "3", "STATE_MODEL_DEF_REF" : "MasterSlave", @@ -450,7 +450,7 @@ IdealState for myDB: "myDB_5" : [ "localhost_12914", "localhost_12915", "localhost_12913" ] }, "simpleFields" : { - "IDEAL_STATE_MODE" : "AUTO", + "REBALANCE_MODE" : "SEMI_AUTO", "NUM_PARTITIONS" : "6", "REPLICAS" : "3", "STATE_MODEL_DEF_REF" : "MasterSlave", @@ -559,7 +559,7 @@ IdealState for myDB: "myDB_5" : [ "localhost_12914", "localhost_12915", "localhost_12913" ] }, "simpleFields" : { - "IDEAL_STATE_MODE" : "AUTO", + "REBALANCE_MODE" : "SEMI_AUTO", "NUM_PARTITIONS" : "6", "REPLICAS" : "3", "STATE_MODEL_DEF_REF" : "MasterSlave", @@ -608,7 +608,7 @@ ExternalView for myDB: As we\'ve seen in this Quickstart, Helix takes care of partitioning, load balancing, elasticity, failure detection and recovery. -##### ZOOINSPECTOR +##### ZooInspector You can view all of the underlying data by going direct to zookeeper. Use ZooInspector that comes with zookeeper to browse the data. This is a java applet (make sure you have X windows) diff --git a/src/site/markdown/Tutorial.md b/src/site/markdown/Tutorial.md index 27f9fd929e..8e025b2ed5 100644 --- a/src/site/markdown/Tutorial.md +++ b/src/site/markdown/Tutorial.md @@ -36,12 +36,14 @@ Convention: we first cover the _basic_ approach, which is the easiest to impleme 2. [Spectator](./tutorial_spectator.html) 3. [Controller](./tutorial_controller.html) 4. [Rebalancing Algorithms](./tutorial_rebalance.html) -5. [State Machines](./tutorial_state.html) -6. [Messaging](./tutorial_messaging.html) -7. [Customized health check](./tutorial_health.html) -8. [Throttling](./tutorial_throttling.html) -9. [Application Property Store](./tutorial_propstore.html) -10. [Admin Interface](./tutorial_admin.html) +5. [User-Defined Rebalancing](./tutorial_user_def_rebalancer.html) +6. [State Machines](./tutorial_state.html) +7. [Messaging](./tutorial_messaging.html) +8. [Customized health check](./tutorial_health.html) +9. [Throttling](./tutorial_throttling.html) +10. [Application Property Store](./tutorial_propstore.html) +11. [Admin Interface](./tutorial_admin.html) +12. [YAML Cluster Setup](./tutorial_yaml.html) ### Preliminaries @@ -180,9 +182,9 @@ Helix does this by assigning a STATE to a partition (such as MASTER, SLAVE), and There are 3 assignment modes Helix can operate on -* AUTO_REBALANCE: Helix decides the placement and state of a partition. -* AUTO: Application decides the placement but Helix decides the state of a partition. -* CUSTOM: Application controls the placement and state of a partition. +* FULL_AUTO: Helix decides the placement and state of a partition. +* SEMI_AUTO: Application decides the placement but Helix decides the state of a partition. +* CUSTOMIZED: Application controls the placement and state of a partition. For more info on the assignment modes, see [Rebalancing Algorithms](./tutorial_rebalance.html) of the tutorial. @@ -190,7 +192,7 @@ For more info on the assignment modes, see [Rebalancing Algorithms](./tutorial_r String RESOURCE_NAME = "MyDB"; int NUM_PARTITIONS = 6; STATE_MODEL_NAME = "MasterSlave"; - String MODE = "AUTO"; + String MODE = "SEMI_AUTO"; int NUM_REPLICAS = 2; admin.addResource(CLUSTER_NAME, RESOURCE_NAME, NUM_PARTITIONS, STATE_MODEL_NAME, MODE); diff --git a/src/site/markdown/index.md b/src/site/markdown/index.md index 57985d03dd..163f559f2f 100644 --- a/src/site/markdown/index.md +++ b/src/site/markdown/index.md @@ -46,6 +46,8 @@ Navigating the Documentation [Distributed Task DAG Execution](./recipes/task_dag_execution.html) +[User-Defined Rebalancer Example](./recipes/user_def_rebalancer.html) + What Is Helix -------------- @@ -54,45 +56,46 @@ Helix is a generic _cluster management_ framework used for the automatic managem What Is Cluster Management -------------------------- -To understand Helix, first you need to understand what is _cluster management_. A distributed system typically runs on multiple nodes for the following reasons: +To understand Helix, first you need to understand _cluster management_. A distributed system typically runs on multiple nodes for the following reasons: * scalability * fault tolerance * load balancing -Each node performs one or more of the primary function of the cluster, such as storing/serving data, producing/consuming data streams, etc. Once configured for your system, Helix acts as the global brain for the system. It is designed to make decisions that cannot be made in isolation. Examples of decisions that require global knowledge and coordination: +Each node performs one or more of the primary function of the cluster, such as storing and serving data, producing and consuming data streams, and so on. Once configured for your system, Helix acts as the global brain for the system. It is designed to make decisions that cannot be made in isolation. Examples of such decisions that require global knowledge and coordination: * scheduling of maintainence tasks, such as backups, garbage collection, file consolidation, index rebuilds * repartitioning of data or resources across the cluster * informing dependent systems of changes so they can react appropriately to cluster changes * throttling system tasks and changes -While it is possible to integrate these functions into the distributed system, it complicates the code. Helix has abstracted common cluster management tasks, enabling the system builder to model the desired behavior in a declarative state model, and let Helix manage the coordination. The result is less new code to write, and a robust, highly operable system. +While it is possible to integrate these functions into the distributed system, it complicates the code. Helix has abstracted common cluster management tasks, enabling the system builder to model the desired behavior with a declarative state model, and let Helix manage the coordination. The result is less new code to write, and a robust, highly operable system. Key Features of Helix --------------------- -1. Automatic assignment of resource/partition to nodes +1. Automatic assignment of resources and partitions to nodes 2. Node failure detection and recovery -3. Dynamic addition of Resources +3. Dynamic addition of resources 4. Dynamic addition of nodes to the cluster 5. Pluggable distributed state machine to manage the state of a resource via state transitions -6. Automatic load balancing and throttling of transitions +6. Automatic load balancing and throttling of transitions +7. Optional pluggable rebalancing for user-defined assignment of resources and partitions Why Helix --------- -Modeling a distributed system as a state machine with constraints on state and transitions has the following benefits: +Modeling a distributed system as a state machine with constraints on states and transitions has the following benefits: -* Separates cluster management from the core functionality. -* Quick transformation from a single node system to an operable, distributed system. -* Simplicity: System components do not have to manage global cluster. This division of labor makes it easier to build, debug, and maintain your system. +* Separates cluster management from the core functionality of the system. +* Allows a quick transformation from a single node system to an operable, distributed system. +* Increases simplicity: system components do not have to manage a global cluster. This division of labor makes it easier to build, debug, and maintain your system. Build Instructions ------------------ -Requirements: Jdk 1.6+, Maven 2.0.8+ +Requirements: JDK 1.6+, Maven 2.0.8+ ``` git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git diff --git a/src/site/markdown/recipes/lock_manager.md b/src/site/markdown/recipes/lock_manager.md index 84420ddc84..252ace7eab 100644 --- a/src/site/markdown/recipes/lock_manager.md +++ b/src/site/markdown/recipes/lock_manager.md @@ -137,7 +137,7 @@ This provides more details on how to setup the cluster and where to plugin appli Create a lock group and specify the number of locks in the lock group. ``` -./helix-admin --zkSvr localhost:2199 --addResource lock-manager-demo lock-group 6 OnlineOffline AUTO_REBALANCE +./helix-admin --zkSvr localhost:2199 --addResource lock-manager-demo lock-group 6 OnlineOffline FULL_AUTO ``` ##### Start the nodes diff --git a/src/site/markdown/recipes/rabbitmq_consumer_group.md b/src/site/markdown/recipes/rabbitmq_consumer_group.md index ec3053a555..9edc2cb242 100644 --- a/src/site/markdown/recipes/rabbitmq_consumer_group.md +++ b/src/site/markdown/recipes/rabbitmq_consumer_group.md @@ -148,7 +148,7 @@ Cluster setup ------------- This step creates znode on zookeeper for the cluster and adds the state model. We use online offline state model since there is no need for other states. The consumer is either processing a queue or it is not. -It creates a resource called "rabbitmq-consumer-group" with 6 partitions. The execution mode is set to AUTO_REBALANCE. This means that the Helix controls the assignment of partition to consumers and automatically distributes the partitions evenly among the active consumers. When a consumer is added or removed, it ensures that a minimum number of partitions are shuffled. +It creates a resource called "rabbitmq-consumer-group" with 6 partitions. The execution mode is set to FULL_AUTO. This means that the Helix controls the assignment of partition to consumers and automatically distributes the partitions evenly among the active consumers. When a consumer is added or removed, it ensures that a minimum number of partitions are shuffled. ``` zkclient = new ZkClient(zkAddr, ZkClient.DEFAULT_SESSION_TIMEOUT, @@ -165,7 +165,7 @@ It creates a resource called "rabbitmq-consumer-group" with 6 partitions. The ex // add resource "topic" which has 6 partitions String resourceName = "rabbitmq-consumer-group"; - admin.addResource(clusterName, resourceName, 6, "OnlineOffline", "AUTO_REBALANCE"); + admin.addResource(clusterName, resourceName, 6, "OnlineOffline", "FULL_AUTO"); ``` Starting the consumers diff --git a/src/site/markdown/recipes/user_def_rebalancer.md b/src/site/markdown/recipes/user_def_rebalancer.md new file mode 100644 index 0000000000..8beac0a46c --- /dev/null +++ b/src/site/markdown/recipes/user_def_rebalancer.md @@ -0,0 +1,287 @@ + +Lock Manager with a User-Defined Rebalancer +------------------------------------------- +Helix is able to compute node preferences and state assignments automatically using general-purpose algorithms. In many cases, a distributed system implementer may choose to instead define a customized approach to computing the location of replicas, the state mapping, or both in response to the addition or removal of participants. The following is an implementation of the [Distributed Lock Manager](./lock_manager.html) that includes a user-defined rebalancer. + +### Define the cluster and locks + +The YAML file below fully defines the cluster and the locks. A lock can be in one of two states: locked and unlocked. Transitions can happen in either direction, and the locked is preferred. A resource in this example is the entire collection of locks to distribute. A partition is mapped to a lock; in this case that means there are 12 locks. These 12 locks will be distributed across 3 nodes. The constraints indicate that only one replica of a lock can be in the locked state at any given time. These locks can each only have a single holder, defined by a replica count of 1. + +Notice the rebalancer section of the definition. The mode is set to USER_DEFINED and the class name refers to the plugged-in rebalancer implementation. This implementation is called whenever the state of the cluster changes, as is the case when participants are added or removed from the system. + +Location: incubator-helix/recipes/user-rebalanced-lock-manager/src/main/resources/lock-manager-config.yaml + +``` +clusterName: lock-manager-custom-rebalancer # unique name for the cluster +resources: + - name: lock-group # unique resource name + rebalancer: # we will provide our own rebalancer + mode: USER_DEFINED + class: org.apache.helix.userrebalancedlocks.LockManagerRebalancer + partitions: + count: 12 # number of locks + replicas: 1 # number of simultaneous holders for each lock + stateModel: + name: lock-unlock # unique model name + states: [LOCKED, RELEASED, DROPPED] # the list of possible states + transitions: # the list of possible transitions + - name: Unlock + from: LOCKED + to: RELEASED + - name: Lock + from: RELEASED + to: LOCKED + - name: DropLock + from: LOCKED + to: DROPPED + - name: DropUnlock + from: RELEASED + to: DROPPED + - name: Undrop + from: DROPPED + to: RELEASED + initialState: RELEASED + constraints: + state: + counts: # maximum number of replicas of a partition that can be in each state + - name: LOCKED + count: "1" + - name: RELEASED + count: "-1" + - name: DROPPED + count: "-1" + priorityList: [LOCKED, RELEASED, DROPPED] # states in order of priority + transition: # transitions priority to enforce order that transitions occur + priorityList: [Unlock, Lock, Undrop, DropUnlock, DropLock] +participants: # list of nodes that can acquire locks + - name: localhost_12001 + host: localhost + port: 12001 + - name: localhost_12002 + host: localhost + port: 12002 + - name: localhost_12003 + host: localhost + port: 12003 +``` + +Then, Helix\'s YAMLClusterSetup tool can read in the configuration and bootstrap the cluster immediately: + +``` +YAMLClusterSetup setup = new YAMLClusterSetup(zkAddress); +InputStream input = + Thread.currentThread().getContextClassLoader() + .getResourceAsStream("lock-manager-config.yaml"); +YAMLClusterSetup.YAMLClusterConfig config = setup.setupCluster(input); +``` + +### Write a rebalancer +Below is a full implementation of a rebalancer. In this case, it simply throws out the previous ideal state, computes the target node for as many partition replicas as can hold a lock in the LOCKED state (in this example, one), and assigns them the LOCKED state (which is at the head of the state preference list). Clearly a more robust implementation would likely examine the current ideal state to maintain current assignments, and the full state list to handle models more complicated than this one. However, for a simple lock holder implementation, this is sufficient. + +Location: incubator-helix/recipes/user-rebalanced-lock-manager/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerRebalancer.java + +``` +public class LockManagerRebalancer implements Rebalancer { + @Override + public void init(HelixManager manager) { + // do nothing; this rebalancer is independent of the manager + } + + @Override + public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, + CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { + // Initialize an empty mapping of locks to participants + ResourceAssignment assignment = new ResourceAssignment(resource.getResourceName()); + + // Get the list of live participants in the cluster + List liveParticipants = new ArrayList(clusterData.getLiveInstances().keySet()); + + // Get the state model (should be a simple lock/unlock model) and the highest-priority state + String stateModelName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); + if (stateModelDef.getStatesPriorityList().size() < 1) { + LOG.error("Invalid state model definition. There should be at least one state."); + return assignment; + } + String lockState = stateModelDef.getStatesPriorityList().get(0); + + // Count the number of participants allowed to lock each lock + String stateCount = stateModelDef.getNumInstancesPerState(lockState); + int lockHolders = 0; + try { + // a numeric value is a custom-specified number of participants allowed to lock the lock + lockHolders = Integer.parseInt(stateCount); + } catch (NumberFormatException e) { + LOG.error("Invalid state model definition. The lock state does not have a valid count"); + return assignment; + } + + // Fairly assign the lock state to the participants using a simple mod-based sequential + // assignment. For instance, if each lock can be held by 3 participants, lock 0 would be held + // by participants (0, 1, 2), lock 1 would be held by (1, 2, 3), and so on, wrapping around the + // number of participants as necessary. + // This assumes a simple lock-unlock model where the only state of interest is which nodes have + // acquired each lock. + int i = 0; + for (Partition partition : resource.getPartitions()) { + Map replicaMap = new HashMap(); + for (int j = i; j < i + lockHolders; j++) { + int participantIndex = j % liveParticipants.size(); + String participant = liveParticipants.get(participantIndex); + // enforce that a participant can only have one instance of a given lock + if (!replicaMap.containsKey(participant)) { + replicaMap.put(participant, lockState); + } + } + assignment.addReplicaMap(partition, replicaMap); + i++; + } + return assignment; + } +} +``` + +### Start up the participants +Here is a lock class based on the newly defined lock-unlock state model so that the participant can receive callbacks on state transitions. + +Location: incubator-helix/recipes/user-rebalanced-lock-manager/src/main/java/org/apache/helix/userdefinedrebalancer/Lock.java + +``` +public class Lock extends StateModel { + private String lockName; + + public Lock(String lockName) { + this.lockName = lockName; + } + + @Transition(from = "RELEASED", to = "LOCKED") + public void lock(Message m, NotificationContext context) { + System.out.println(context.getManager().getInstanceName() + " acquired lock:" + lockName); + } + + @Transition(from = "LOCKED", to = "RELEASED") + public void release(Message m, NotificationContext context) { + System.out.println(context.getManager().getInstanceName() + " releasing lock:" + lockName); + } +} +``` + +Here is the factory to make the Lock class accessible. + +Location: incubator-helix/recipes/user-rebalanced-lock-manager/src/main/java/org/apache/helix/userdefinedrebalancer/LockFactory.java + +``` +public class LockFactory extends StateModelFactory { + @Override + public Lock createNewStateModel(String lockName) { + return new Lock(lockName); + } +} +``` + +Finally, here is the factory registration and the start of the participant: + +``` +participantManager = + HelixManagerFactory.getZKHelixManager(clusterName, participantName, InstanceType.PARTICIPANT, + zkAddress); +participantManager.getStateMachineEngine().registerStateModelFactory(stateModelName, + new LockFactory()); +participantManager.connect(); +``` + +### Start up the controller + +``` +controllerManager = + HelixControllerMain.startHelixController(zkAddress, config.clusterName, "controller", + HelixControllerMain.STANDALONE); +``` + +### Try it out +#### Building +``` +git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git +cd incubator-helix +mvn clean install package -DskipTests +cd recipes/user-rebalanced-lock-manager/target/user-rebalanced-lock-manager-pkg/bin +chmod +x * +./lock-manager-demo.sh +``` + +#### Output + +``` +./lock-manager-demo +STARTING localhost_12002 +STARTING localhost_12001 +STARTING localhost_12003 +STARTED localhost_12001 +STARTED localhost_12003 +STARTED localhost_12002 +localhost_12003 acquired lock:lock-group_4 +localhost_12002 acquired lock:lock-group_8 +localhost_12001 acquired lock:lock-group_10 +localhost_12001 acquired lock:lock-group_3 +localhost_12001 acquired lock:lock-group_6 +localhost_12003 acquired lock:lock-group_0 +localhost_12002 acquired lock:lock-group_5 +localhost_12001 acquired lock:lock-group_9 +localhost_12002 acquired lock:lock-group_2 +localhost_12003 acquired lock:lock-group_7 +localhost_12003 acquired lock:lock-group_11 +localhost_12002 acquired lock:lock-group_1 +lockName acquired By +====================================== +lock-group_0 localhost_12003 +lock-group_1 localhost_12002 +lock-group_10 localhost_12001 +lock-group_11 localhost_12003 +lock-group_2 localhost_12002 +lock-group_3 localhost_12001 +lock-group_4 localhost_12003 +lock-group_5 localhost_12002 +lock-group_6 localhost_12001 +lock-group_7 localhost_12003 +lock-group_8 localhost_12002 +lock-group_9 localhost_12001 +Stopping the first participant +localhost_12001 Interrupted +localhost_12002 acquired lock:lock-group_3 +localhost_12003 acquired lock:lock-group_6 +localhost_12003 acquired lock:lock-group_10 +localhost_12002 acquired lock:lock-group_9 +lockName acquired By +====================================== +lock-group_0 localhost_12003 +lock-group_1 localhost_12002 +lock-group_10 localhost_12003 +lock-group_11 localhost_12003 +lock-group_2 localhost_12002 +lock-group_3 localhost_12002 +lock-group_4 localhost_12003 +lock-group_5 localhost_12002 +lock-group_6 localhost_12003 +lock-group_7 localhost_12003 +lock-group_8 localhost_12002 +lock-group_9 localhost_12002 +``` + +Notice that the lock assignment directly follows the assignment generated by the user-defined rebalancer both initially and after a participant is removed from the system. \ No newline at end of file diff --git a/src/site/markdown/tutorial_controller.md b/src/site/markdown/tutorial_controller.md index 17cd53254e..e391673c5c 100644 --- a/src/site/markdown/tutorial_controller.md +++ b/src/site/markdown/tutorial_controller.md @@ -83,7 +83,7 @@ If setting up a separate controller process is not viable, then it is possible t #### CONTROLLER AS A SERVICE -One of the cool feature we added in Helix was to use a set of controllers to manage a large number of clusters. +One of the cool features we added in Helix is to use a set of controllers to manage a large number of clusters. For example if you have X clusters to be managed, instead of deploying X*3 (3 controllers for fault tolerance) controllers for each cluster, one can deploy just 3 controllers. Each controller can manage X/3 clusters. If any controller fails, the remaining two will manage X/2 clusters. diff --git a/src/site/markdown/tutorial_messaging.md b/src/site/markdown/tutorial_messaging.md index f3fef109fa..c6fd3b2095 100644 --- a/src/site/markdown/tutorial_messaging.md +++ b/src/site/markdown/tutorial_messaging.md @@ -25,10 +25,10 @@ In this chapter, we\'ll learn about messaging, a convenient feature in Helix for Consider a search system where the index replica starts up and it does not have an index. A typical solution is to get the index from a common location, or to copy the index from another replica. -Helix provides a messaging api for intra-cluster communication between nodes in the system. Helix provides a mechanism to specify the message recipient in terms of resource, partition, and state rather than specifying hostnames. Helix ensures that the message is delivered to all of the required recipients. In this particular use case, the instance can specify the recipient criteria as all replicas of the desired partition to bootstrap. +Helix provides a messaging API for intra-cluster communication between nodes in the system. Helix provides a mechanism to specify the message recipient in terms of resource, partition, and state rather than specifying hostnames. Helix ensures that the message is delivered to all of the required recipients. In this particular use case, the instance can specify the recipient criteria as all replicas of the desired partition to bootstrap. Since Helix is aware of the global state of the system, it can send the message to appropriate nodes. Once the nodes respond, Helix provides the bootstrapping replica with all the responses. -This is a very generic api and can also be used to schedule various periodic tasks in the cluster, such as data backups, log cleanup, etc. +This is a very generic API and can also be used to schedule various periodic tasks in the cluster, such as data backups, log cleanup, etc. System Admins can also perform ad-hoc tasks, such as on-demand backups or a system command (such as rm -rf ;) across all nodes of the cluster ``` diff --git a/src/site/markdown/tutorial_participant.md b/src/site/markdown/tutorial_participant.md index 19e6f98b4b..cd4bcd2cae 100644 --- a/src/site/markdown/tutorial_participant.md +++ b/src/site/markdown/tutorial_participant.md @@ -19,7 +19,7 @@ under the License. # Helix Tutorial: Participant -In this chapter, we\'ll learn how to implement a PARTICIPANT, which is a primary functional component of a distributed system. +In this chapter, we\'ll learn how to implement a Participant, which is a primary functional component of a distributed system. ### Start the Helix agent @@ -43,6 +43,7 @@ The methods of the State Model will be called when controller sends transitions * MasterSlaveStateModelFactory * LeaderStandbyStateModelFactory * BootstrapHandler +* _An application defined state model factory_ ``` @@ -58,7 +59,7 @@ The methods of the State Model will be called when controller sends transitions manager.connect(); ``` -Helix doesn\'t know what it means to change from OFFLIN\-\-\>ONLINE or ONLINE\-\-\>OFFLINE. The following code snippet shows where you insert your system logic for these two state transitions. +Helix doesn\'t know what it means to change from OFFLINE\-\-\>ONLINE or ONLINE\-\-\>OFFLINE. The following code snippet shows where you insert your system logic for these two state transitions. ``` public class OnlineOfflineStateModelFactory extends diff --git a/src/site/markdown/tutorial_rebalance.md b/src/site/markdown/tutorial_rebalance.md index 1f5930dae6..f8f0511005 100644 --- a/src/site/markdown/tutorial_rebalance.md +++ b/src/site/markdown/tutorial_rebalance.md @@ -19,7 +19,7 @@ under the License. # Helix Tutorial: Rebalancing Algorithms -The placement of partitions in a distributed system is essential for the reliability and scalability of the system. For example, when a node fails, it is important that the partitions hosted on that node are reallocated evenly among the remaining nodes. Consistent hashing is one such algorithm that can satisfy this guarantee. Helix provides a variant of consistent hashing based on the RUSH algorithm. +The placement of partitions in a distributed system is essential for the reliability and scalability of the system. For example, when a node fails, it is important that the partitions hosted on that node are reallocated evenly among the remaining nodes. Consistent hashing is one such algorithm that can satisfy this guarantee. Helix provides a variant of consistent hashing based on the RUSH algorithm, among others. This means given a number of partitions, replicas and number of nodes, Helix does the automatic assignment of partition to nodes such that: @@ -32,25 +32,26 @@ Helix employs a rebalancing algorithm to compute the _ideal state_ of the system Helix makes it easy to perform this operation, while giving you control over the algorithm. In this section, we\'ll see how to implement the desired behavior. -Helix has three options for rebalancing, in increasing order of customization by the system builder: +Helix has four options for rebalancing, in increasing order of customization by the system builder: -* AUTO_REBALANCE -* AUTO -* CUSTOM +* FULL_AUTO +* SEMI_AUTO +* CUSTOMIZED +* USER_DEFINED ``` - |AUTO REBALANCE| AUTO | CUSTOM | - ----------------------------------------- - LOCATION | HELIX | APP | APP | - ----------------------------------------- - STATE | HELIX | HELIX | APP | - ----------------------------------------- + |FULL_AUTO | SEMI_AUTO | CUSTOMIZED| USER_DEFINED | + ---------------------------------------------------------| + LOCATION | HELIX | APP | APP | APP | + ---------------------------------------------------------| + STATE | HELIX | HELIX | APP | APP | + ---------------------------------------------------------- ``` -### AUTO_REBALANCE +### FULL_AUTO -When the idealstate mode is set to AUTO_REBALANCE, Helix controls both the location of the replica along with the state. This option is useful for applications where creation of a replica is not expensive. +When the rebalance mode is set to FULL_AUTO, Helix controls both the location of the replica along with the state. This option is useful for applications where creation of a replica is not expensive. For example, consider this system that uses a MasterSlave state model, with 3 partitions and 2 replicas in the ideal state. @@ -58,7 +59,7 @@ For example, consider this system that uses a MasterSlave state model, with 3 pa { "id" : "MyResource", "simpleFields" : { - "IDEAL_STATE_MODE" : "AUTO_REBALANCE", + "REBALANCE_MODE" : "FULL_AUTO", "NUM_PARTITIONS" : "3", "REPLICAS" : "2", "STATE_MODEL_DEF_REF" : "MasterSlave", @@ -103,9 +104,9 @@ If there are 3 nodes in the cluster, then Helix will balance the masters and sla Another typical example is evenly distributing a group of tasks among the currently healthy processes. For example, if there are 60 tasks and 4 nodes, Helix assigns 15 tasks to each node. When one node fails, Helix redistributes its 15 tasks to the remaining 3 nodes, resulting in a balanced 20 tasks per node. Similarly, if a node is added, Helix re-allocates 3 tasks from each of the 4 nodes to the 5th node, resulting in a balanced distribution of 12 tasks per node.. -#### AUTO +#### SEMI_AUTO -When the application needs to control the placement of the replicas, use the AUTO idealstate mode. +When the application needs to control the placement of the replicas, use the SEMI_AUTO rebalance mode. Example: In the ideal state below, the partition \'MyResource_0\' is constrained to be placed only on node1 or node2. The choice of _state_ is still controlled by Helix. That means MyResource_0.MASTER could be on node1 and MyResource_0.SLAVE on node2, or vice-versa but neither would be placed on node3. @@ -113,7 +114,7 @@ Example: In the ideal state below, the partition \'MyResource_0\' is constrained { "id" : "MyResource", "simpleFields" : { - "IDEAL_STATE_MODE" : "AUTO", + "REBALANCE_MODE" : "SEMI_AUTO", "NUM_PARTITIONS" : "3", "REPLICAS" : "2", "STATE_MODEL_DEF_REF" : "MasterSlave", @@ -130,11 +131,11 @@ Example: In the ideal state below, the partition \'MyResource_0\' is constrained The MasterSlave state model requires that a partition has exactly one MASTER at all times, and the other replicas should be SLAVEs. In this simple example with 2 replicas per partition, there would be one MASTER and one SLAVE. Upon failover, a SLAVE has to assume mastership, and a new SLAVE will be generated. -In this mode when node1 fails, unlike in AUTO-REBALANCE mode the partition is _not_ moved from node1 to node3. Instead, Helix will decide to change the state of MyResource_0 on node2 from SLAVE to MASTER, based on the system constraints. +In this mode when node1 fails, unlike in FULL_AUTO mode the partition is _not_ moved from node1 to node3. Instead, Helix will decide to change the state of MyResource_0 on node2 from SLAVE to MASTER, based on the system constraints. -#### CUSTOM +#### CUSTOMIZED -Finally, Helix offers a third mode called CUSTOM, in which the application controls the placement _and_ state of each replica. The application needs to implement a callback interface that Helix invokes when the cluster state changes. +Helix offers a third mode called CUSTOMIZED, in which the application controls the placement _and_ state of each replica. The application needs to implement a callback interface that Helix invokes when the cluster state changes. Within this callback, the application can recompute the idealstate. Helix will then issue appropriate transitions such that _Idealstate_ and _Currentstate_ converges. Here\'s an example, again with 3 partitions, 2 replicas per partition, and the MasterSlave state model: @@ -143,7 +144,7 @@ Here\'s an example, again with 3 partitions, 2 replicas per partition, and the M { "id" : "MyResource", "simpleFields" : { - "IDEAL_STATE_MODE" : "CUSTOM", + "REBALANCE_MODE" : "CUSTOMIZED", "NUM_PARTITIONS" : "3", "REPLICAS" : "2", "STATE_MODEL_DEF_REF" : "MasterSlave", @@ -166,3 +167,11 @@ Here\'s an example, again with 3 partitions, 2 replicas per partition, and the M ``` Suppose the current state of the system is 'MyResource_0' -> {N1:MASTER, N2:SLAVE} and the application changes the ideal state to 'MyResource_0' -> {N1:SLAVE,N2:MASTER}. While the application decides which node is MASTER and which is SLAVE, Helix will not blindly issue MASTER-->SLAVE to N1 and SLAVE-->MASTER to N2 in parallel, since that might result in a transient state where both N1 and N2 are masters, which violates the MasterSlave constraint that there is exactly one MASTER at a time. Helix will first issue MASTER-->SLAVE to N1 and after it is completed, it will issue SLAVE-->MASTER to N2. + +#### USER_DEFINED + +For maximum flexibility, Helix exposes an interface that can allow applications to plug in custom rebalancing logic. By providing the name of a class that implements the Rebalancer interface, Helix will automatically call the contained method whenever there is a change to the live participants in the cluster. For more, see [User-Defined Rebalancer](./tutorial_user_def_rebalancer.html). + +#### Backwards Compatibility + +In previous versions, FULL_AUTO was called AUTO_REBALANCE and SEMI_AUTO was called AUTO. Furthermore, they were presented as the IDEAL_STATE_MODE. Helix supports both IDEAL_STATE_MODE and REBALANCE_MODE, but IDEAL_STATE_MODE is now deprecated and may be phased out in future versions. diff --git a/src/site/markdown/tutorial_spectator.md b/src/site/markdown/tutorial_spectator.md index a5b9a0eec3..bdf50a7d0d 100644 --- a/src/site/markdown/tutorial_spectator.md +++ b/src/site/markdown/tutorial_spectator.md @@ -19,11 +19,11 @@ under the License. # Helix Tutorial: Spectator -Next, we\'ll learn how to implement a SPECTATOR. Typically, a spectator needs to react to changes within the distributed system. Examples: a client that needs to know where to send a request, a topic consumer in a consumer group. The spectator is automatically informed of changes in the _external state_ of the cluster, but it does not have to add any code to keep track of other components in the system. +Next, we\'ll learn how to implement a Spectator. Typically, a spectator needs to react to changes within the distributed system. Examples: a client that needs to know where to send a request, a topic consumer in a consumer group. The spectator is automatically informed of changes in the _external state_ of the cluster, but it does not have to add any code to keep track of other components in the system. ### Start the Helix agent -Same as for a PARTICIPANT, The Helix agent is the common component that connects each system component with the controller. +Same as for a Participant, The Helix agent is the common component that connects each system component with the controller. It requires the following parameters: diff --git a/src/site/markdown/tutorial_user_def_rebalancer.md b/src/site/markdown/tutorial_user_def_rebalancer.md new file mode 100644 index 0000000000..6d07878949 --- /dev/null +++ b/src/site/markdown/tutorial_user_def_rebalancer.md @@ -0,0 +1,196 @@ + +# Tutorial: User-Defined Rebalancing + +Even though Helix can compute both the location and the state of replicas internally using a default fully-automatic rebalancer, specific applications may require rebalancing strategies that optimize for different requirements. Thus, Helix allows applications to plug in arbitrary rebalancer algorithms that implement a provided interface. One of the main design goals of Helix is to provide maximum flexibility to any distributed application. Thus, it allows applications to fully implement the rebalancer, which is the core constraint solver in the system, if the application developer so chooses. + +Whenever the state of the cluster changes, as is the case when participants join or leave the cluster, Helix automatically calls the rebalancer to compute a new mapping of all the replicas in the resource. When using a pluggable rebalancer, the only required step is to register it with Helix. Subsequently, no additional bootstrapping steps are necessary. Helix uses reflection to look up and load the class dynamically at runtime. As a result, it is also technically possible to change the rebalancing strategy used at any time. + +The Rebalancer interface is as follows: + +``` +ResourceMapping computeResourceMapping(final Resource resource, + final IdealState currentIdealState, final CurrentStateOutput currentStateOutput, + final ClusterDataCache clusterData); +``` +The first parameter is the resource to rebalance, the second is pre-existing ideal mappings, the third is a snapshot of the actual placements and state assignments, and the fourth is a full cache of all of the cluster data available to Helix. Internally, Helix implements the same interface for its own rebalancing routines, so a user-defined rebalancer will be cognizant of the same information about the cluster as an internal implementation. Helix strives to provide applications the ability to implement algorithms that may require a large portion of the entire state of the cluster to make the best placement and state assignment decisions possible. + +A ResourceMapping is a full representation of the location and the state of each replica of each partition of a given resource. This is a simple representation of the placement that the algorithm believes is the best possible. If the placement meets all defined constraints, this is what will become the actual state of the distributed system. + +### Specifying a Rebalancer +For implementations that set up the cluster through existing code, the following HelixAdmin calls will update the Rebalancer class: + +``` +IdealState idealState = helixAdmin.getResourceIdealState(clusterName, resourceName); +idealState.setRebalanceMode(RebalanceMode.USER_DEFINED); +idealState.setRebalancerClassName(className); +helixAdmin.setResourceIdealState(clusterName, resourceName, idealState); +``` +There are two key fields to set to specify that a pluggable rebalancer should be used. First, the rebalance mode should be set to USER_DEFINED, and second the rebalancer class name should be set to a class that implements Rebalancer and is within the scope of the project. The class name is a fully-qualified class name consisting of its package and its name. Without specification of the USER_DEFINED mode, the user-defined rebalancer class will not be used even if specified. Furthermore, Helix will not attempt to rebalance the resources through its standard routines if its mode is USER_DEFINED, regardless of whether or not a rebalancer class is registered. + +Alternatively, the rebalancer class name can be specified in a YAML file representing the cluster configuration. The requirements are the same, but the representation is more compact. Below are the first few lines of an example YAML file. To see a full YAML specification, see the [YAML tutorial](./tutorial_yaml.html). + +``` +clusterName: lock-manager-custom-rebalancer # unique name for the cluster +resources: + - name: lock-group # unique resource name + rebalancer: # we will provide our own rebalancer + mode: USER_DEFINED + class: domain.project.helix.rebalancer.UserDefinedRebalancerClass +... +``` + +### Example +We demonstrate plugging in a simple user-defined rebalancer as part of a revisit of the [distributed lock manager](./recipes/user_def_rebalancer.html) example. It includes a functional Rebalancer implementation, as well as the entire YAML file used to define the cluster. + +Consider the case where partitions are locks in a lock manager and 6 locks are to be distributed evenly to a set of participants, and only one participant can hold each lock. We can define a rebalancing algorithm that simply takes the modulus of the lock number and the number of participants to evenly distribute the locks across participants. Helix allows capping the number of partitions a participant can accept, but since locks are lightweight, we do not need to define a restriction in this case. The following is a succinct implementation of this algorithm. + +``` +@Override +public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, + CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { + // Initialize an empty mapping of locks to participants + ResourceAssignment assignment = new ResourceAssignment(resource.getResourceName()); + + // Get the list of live participants in the cluster + List liveParticipants = new ArrayList(clusterData.getLiveInstances().keySet()); + + // Get the state model (should be a simple lock/unlock model) and the highest-priority state + String stateModelName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); + if (stateModelDef.getStatesPriorityList().size() < 1) { + LOG.error("Invalid state model definition. There should be at least one state."); + return assignment; + } + String lockState = stateModelDef.getStatesPriorityList().get(0); + + // Count the number of participants allowed to lock each lock + String stateCount = stateModelDef.getNumInstancesPerState(lockState); + int lockHolders = 0; + try { + // a numeric value is a custom-specified number of participants allowed to lock the lock + lockHolders = Integer.parseInt(stateCount); + } catch (NumberFormatException e) { + LOG.error("Invalid state model definition. The lock state does not have a valid count"); + return assignment; + } + + // Fairly assign the lock state to the participants using a simple mod-based sequential + // assignment. For instance, if each lock can be held by 3 participants, lock 0 would be held + // by participants (0, 1, 2), lock 1 would be held by (1, 2, 3), and so on, wrapping around the + // number of participants as necessary. + // This assumes a simple lock-unlock model where the only state of interest is which nodes have + // acquired each lock. + int i = 0; + for (Partition partition : resource.getPartitions()) { + Map replicaMap = new HashMap(); + for (int j = i; j < i + lockHolders; j++) { + int participantIndex = j % liveParticipants.size(); + String participant = liveParticipants.get(participantIndex); + // enforce that a participant can only have one instance of a given lock + if (!replicaMap.containsKey(participant)) { + replicaMap.put(participant, lockState); + } + } + assignment.addReplicaMap(partition, replicaMap); + i++; + } + return assignment; +} +``` + +Here is the ResourceMapping emitted by the user-defined rebalancer for a 3-participant system whenever there is a change to the set of participants. + +* Participant_A joins + +``` +{ + "lock_0": { "Participant_A": "LOCKED"}, + "lock_1": { "Participant_A": "LOCKED"}, + "lock_2": { "Participant_A": "LOCKED"}, + "lock_3": { "Participant_A": "LOCKED"}, + "lock_4": { "Participant_A": "LOCKED"}, + "lock_5": { "Participant_A": "LOCKED"}, +} +``` + +A ResourceMapping is a mapping for each resource of partition to the participant serving each replica and the state of each replica. The state model is a simple LOCKED/RELEASED model, so participant A holds all lock partitions in the LOCKED state. + +* Participant_B joins + +``` +{ + "lock_0": { "Participant_A": "LOCKED"}, + "lock_1": { "Participant_B": "LOCKED"}, + "lock_2": { "Participant_A": "LOCKED"}, + "lock_3": { "Participant_B": "LOCKED"}, + "lock_4": { "Participant_A": "LOCKED"}, + "lock_5": { "Participant_B": "LOCKED"}, +} +``` + +Now that there are two participants, the simple mod-based function assigns every other lock to the second participant. On any system change, the rebalancer is invoked so that the application can define how to redistribute its resources. + +* Participant_C joins (steady state) + +``` +{ + "lock_0": { "Participant_A": "LOCKED"}, + "lock_1": { "Participant_B": "LOCKED"}, + "lock_2": { "Participant_C": "LOCKED"}, + "lock_3": { "Participant_A": "LOCKED"}, + "lock_4": { "Participant_B": "LOCKED"}, + "lock_5": { "Participant_C": "LOCKED"}, +} +``` + +This is the steady state of the system. Notice that four of the six locks now have a different owner. That is because of the naïve modulus-based assignmemt approach used by the user-defined rebalancer. However, the interface is flexible enough to allow you to employ consistent hashing or any other scheme if minimal movement is a system requirement. + +* Participant_B fails + +``` +{ + "lock_0": { "Participant_A": "LOCKED"}, + "lock_1": { "Participant_C": "LOCKED"}, + "lock_2": { "Participant_A": "LOCKED"}, + "lock_3": { "Participant_C": "LOCKED"}, + "lock_4": { "Participant_A": "LOCKED"}, + "lock_5": { "Participant_C": "LOCKED"}, +} +``` + +On any node failure, as in the case of node addition, the rebalancer is invoked automatically so that it can generate a new mapping as a response to the change. Helix ensures that the Rebalancer has the opportunity to reassign locks as required by the application. + +* Participant_B (or the replacement for the original Participant_B) rejoins + +``` +{ + "lock_0": { "Participant_A": "LOCKED"}, + "lock_1": { "Participant_B": "LOCKED"}, + "lock_2": { "Participant_C": "LOCKED"}, + "lock_3": { "Participant_A": "LOCKED"}, + "lock_4": { "Participant_B": "LOCKED"}, + "lock_5": { "Participant_C": "LOCKED"}, +} +``` + +The rebalancer was invoked once again and the resulting ResourceMapping reflects the steady state. + +### Caveats +- The rebalancer class must be available at runtime, or else Helix will not attempt to rebalance at all \ No newline at end of file diff --git a/src/site/markdown/tutorial_yaml.md b/src/site/markdown/tutorial_yaml.md new file mode 100644 index 0000000000..1524c9d91e --- /dev/null +++ b/src/site/markdown/tutorial_yaml.md @@ -0,0 +1,98 @@ + + +# Helix Tutorial: YAML Cluster Setup + +As an alternative to using Helix Admin to set up the cluster, its resources, constraints, and the state model, Helix supports bootstrapping a cluster configuration based on a YAML file. Below is an annotated example of such a file for a simple distributed lock manager where a lock can only be LOCKED or RELEASED, and each lock only allows a single participant to hold it in the LOCKED state. + +``` +clusterName: lock-manager-custom-rebalancer # unique name for the cluster (required) +resources: + - name: lock-group # unique resource name (required) + rebalancer: # required + mode: USER_DEFINED # required - USER_DEFINED means we will provide our own rebalancer + class: org.apache.helix.userdefinedrebalancer.LockManagerRebalancer # required for USER_DEFINED + partitions: + count: 12 # number of partitions for the resource (default is 1) + replicas: 1 # number of replicas per partition (default is 1) + stateModel: + name: lock-unlock # model name (required) + states: [LOCKED, RELEASED, DROPPED] # the list of possible states (required if model not built-in) + transitions: # the list of possible transitions (required if model not built-in) + - name: Unlock + from: LOCKED + to: RELEASED + - name: Lock + from: RELEASED + to: LOCKED + - name: DropLock + from: LOCKED + to: DROPPED + - name: DropUnlock + from: RELEASED + to: DROPPED + - name: Undrop + from: DROPPED + to: RELEASED + initialState: RELEASED # (required if model not built-in) + constraints: + state: + counts: # maximum number of replicas of a partition that can be in each state (required if model not built-in) + - name: LOCKED + count: "1" + - name: RELEASED + count: "-1" + - name: DROPPED + count: "-1" + priorityList: [LOCKED, RELEASED, DROPPED] # states in order of priority (all priorities equal if not specified) + transition: # transitions priority to enforce order that transitions occur + priorityList: [Unlock, Lock, Undrop, DropUnlock, DropLock] # all priorities equal if not specified +participants: # list of nodes that can serve replicas (optional if dynamic joining is active, required otherwise) + - name: localhost_12001 + host: localhost + port: 12001 + - name: localhost_12002 + host: localhost + port: 12002 + - name: localhost_12003 + host: localhost + port: 12003 +``` + +Using a file like the one above, the cluster can be set up either with the command line: + +``` +incubator-helix/helix-core/target/helix-core/pkg/bin/YAMLClusterSetup.sh localhost:2199 lock-manager-config.yaml +``` + +or with code: + +``` +YAMLClusterSetup setup = new YAMLClusterSetup(zkAddress); +InputStream input = + Thread.currentThread().getContextClassLoader() + .getResourceAsStream("lock-manager-config.yaml"); +YAMLClusterSetup.YAMLClusterConfig config = setup.setupCluster(input); +``` + +Some notes: + +- A rebalancer class is only required for the USER_DEFINED mode. It is ignored otherwise. + +- Built-in state models, like OnlineOffline, LeaderStandby, and MasterSlave, or state models that have already been added only require a name for stateModel. If partition and/or replica counts are not provided, a value of 1 is assumed. \ No newline at end of file diff --git a/src/site/site.xml b/src/site/site.xml index e44a43ed2b..e9a5cce5d1 100644 --- a/src/site/site.xml +++ b/src/site/site.xml @@ -72,6 +72,7 @@ + From 1d3c32ed264caf5cd3669ec6054a8518cce34a61 Mon Sep 17 00:00:00 2001 From: zzhang Date: Mon, 2 Sep 2013 22:46:51 -0700 Subject: [PATCH 011/113] [HELIX-227] Create a wiki page for helix admin api, rb=13933 --- src/site/markdown/tutorial_admin.md | 500 ++++++++++++++++++++-------- 1 file changed, 368 insertions(+), 132 deletions(-) diff --git a/src/site/markdown/tutorial_admin.md b/src/site/markdown/tutorial_admin.md index 57f34fc756..72c1f13984 100644 --- a/src/site/markdown/tutorial_admin.md +++ b/src/site/markdown/tutorial_admin.md @@ -19,149 +19,385 @@ under the License. # Helix Tutorial: Admin Operations -Helix provides interfaces for the operator to administer the cluster. For convenience, there is a command line interface as well as a REST interface. +Helix provides a set of admin api for cluster management operations. They are supported via: -### Helix Admin operations +* _Java API_ +* _Commandline interface_ +* _REST interface via helix-admin-webapp_ -First, make sure you get to the command-line tool, or include it in your shell PATH. +### Java API +See interface [_org.apache.helix.HelixAdmin_](http://helix.incubator.apache.org/apidocs/reference/org/apache/helix/HelixAdmin.html) -``` -cd helix/helix-core/target/helix-core-pkg/bin -``` - -Get help - -``` -./helix-admin.sh --help -``` - -All other commands have this form: - -``` -./helix-admin.sh --zkSvr -``` - -Now, here are the admin commands: - -Add a new cluster - -``` - --addCluster -``` - -Add a new Instance to a cluster - -``` - --addNode -``` - -Add a State model to a cluster -_WE NEED A SPEC FOR A VALID STATE MODEL_ +### Command-line interface +The command-line tool comes with helix-core package: -``` - --addStateModelDef > -``` - -Add a resource to a cluster +Get the command-line tool: -``` - --addResource +``` + - git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git + - cd incubator-helix + - ./build + - cd helix-core/target/helix-core-pkg/bin + - chmod +x *.sh ``` -Upload an IdealState (Partition to Node Mapping) -_WE NEED A SPEC FOR A VALID IDEAL STATE_ +Get help: ``` - --addIdealState -``` - -Delete a cluster - -``` - --dropCluster -``` - -Delete a resource (drop an existing resource from a cluster) - + - ./helix-admin.sh --help ``` - --dropResource -``` - -Drop an existing instance from a cluster - -``` - --dropNode -``` - -Enable/disable the entire cluster. This will pause the controller, which means no transitions will be trigger, but the existing nodes in the cluster continue to function, but without any management by the controller. - -``` - --enableCluster -``` - -Enable/disable an instance. Useful to take a node out of the cluster for maintenance/upgrade. -``` - --enableInstance -``` - -Enable/disable a partition - -``` - --enablePartition -``` - -Query info of a cluster - -``` - --listClusterInfo -``` - -List existing clusters (remember, Helix can manage multiple clusters) - -``` - --listClusters -``` - -Query info of a single Instance in a cluster - -``` - --listInstanceInfo -``` - -List instances in a cluster - -``` - --listInstances -``` - -Query info of a partition - -``` - --listPartitionInfo -``` - -Query info of a resource - -``` - --listResourceInfo -``` - -List resources hosted in a cluster - -``` - --listResources -``` - -Query info of a state model in a cluster - -``` - --listStateModel -``` - -Query info of state models in a cluster +All other commands have this form: ``` - --listStateModels -``` + ./helix-admin.sh --zkSvr +``` + +Admin commands and brief description: + +| Command syntax | Description | +| -------------- | ----------- | +| _\-\-activateCluster \_ | Enable/disable a cluster in distributed controller mode | +| _\-\-addCluster \_ | Add a new cluster | +| _\-\-addIdealState \_ | Add an ideal state to a cluster | +| _\-\-addInstanceTag \_ | Add a tag to an instance | +| _\-\-addNode \_ | Add an instance to a cluster | +| _\-\-addResource \_ | Add a new resource to a cluster | +| _\-\-addResourceProperty \_ | Add a resource property | +| _\-\-addStateModelDef \_ | Add a State model definition to a cluster | +| _\-\-dropCluster \_ | Delete a cluster | +| _\-\-dropNode \_ | Remove a node from a cluster | +| _\-\-dropResource \_ | Remove an existing resource from a cluster | +| _\-\-enableCluster \_ | Enable/disable a cluster | +| _\-\-enableInstance \_ | Enable/disable an instance | +| _\-\-enablePartition \_ | Enable/disable a partition | +| _\-\-getConfig \_ | Get user configs | +| _\-\-getConstraints \_ | Get constraints | +| _\-\-help_ | print help information | +| _\-\-instanceGroupTag \_ | Specify instance group tag, used with rebalance command | +| _\-\-listClusterInfo \_ | Show information of a cluster | +| _\-\-listClusters_ | List all clusters | +| _\-\-listInstanceInfo \_ | Show information of an instance | +| _\-\-listInstances \_ | List all instances in a cluster | +| _\-\-listPartitionInfo \_ | Show information of a partition | +| _\-\-listResourceInfo \_ | Show information of a resource | +| _\-\-listResources \_ | List all resources in a cluster | +| _\-\-listStateModel \_ | Show information of a state model | +| _\-\-listStateModels \_ | List all state models in a cluster | +| _\-\-maxPartitionsPerNode \_ | Specify the max partitions per instance, used with addResourceGroup command | +| _\-\-rebalance \_ | Rebalance a resource | +| _\-\-removeConfig \_ | Remove user configs | +| _\-\-removeConstraint \_ | Remove a constraint | +| _\-\-removeInstanceTag \_ | Remove a tag from an instance | +| _\-\-removeResourceProperty \_ | Remove a resource property | +| _\-\-resetInstance \_ | Reset all erroneous partitions on an instance | +| _\-\-resetPartition \_ | Reset an erroneous partition | +| _\-\-resetResource \_ | Reset all erroneous partitions of a resource | +| _\-\-setConfig \_ | Set user configs | +| _\-\-setConstraint \_ | Set a constraint | +| _\-\-swapInstance \_ | Swap an old instance with a new instance | +| _\-\-zkSvr \_ | Provide zookeeper address | + +### REST interface + +The REST interface comes wit helix-admin-webapp package: + +``` + - git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git + - cd incubator-helix + - ./build + - cd helix-admin-webapp/target/helix-admin-webapp-pkg/bin + - chmod +x *.sh + - ./run-rest-admin.sh --zkSvr --port // make sure zookeeper is running +``` + +#### URL and support methods + +* _/clusters_ + * List all clusters + + ``` + curl http://localhost:8100/clusters + ``` + + * Add a cluster + + ``` + curl -d 'jsonParameters={"command":"addCluster","clusterName":"MyCluster"}' -H "Content-Type: application/json" http://localhost:8100/clusters + ``` + +* _/clusters/{clusterName}_ + * List cluster information + + ``` + curl http://localhost:8100/clusters/MyCluster + ``` + + * Enable/disable a cluster in distributed controller mode + + ``` + curl -d 'jsonParameters={"command":"activateCluster","grandCluster":"MyControllerCluster","enabled":"true"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster + ``` + + * Remove a cluster + + ``` + curl -X DELETE http://localhost:8100/clusters/MyCluster + ``` + +* _/clusters/{clusterName}/resourceGroups_ + * List all resources in a cluster + + ``` + curl http://localhost:8100/clusters/MyCluster/resourceGroups + ``` + + * Add a resource to cluster + + ``` + curl -d 'jsonParameters={"command":"addResource","resourceGroupName":"MyDB","partitions":"8","stateModelDefRef":"MasterSlave" }' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/resourceGroups + ``` + +* _/clusters/{clusterName}/resourceGroups/{resourceName}_ + * List resource information + + ``` + curl http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB + ``` + + * Drop a resource + + ``` + curl -X DELETE http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB + ``` + + * Reset all erroneous partitions of a resource + + ``` + curl -d 'jsonParameters={"command":"resetResource"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB + ``` + +* _/clusters/{clusterName}/resourceGroups/{resourceName}/idealState_ + * Rebalance a resource + + ``` + curl -d 'jsonParameters={"command":"rebalance","replicas":"3"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB/idealState + ``` + + * Add an ideal state + + ``` + echo jsonParameters={ + "command":"addIdealState" + }&newIdealState={ + "id" : "MyDB", + "simpleFields" : { + "IDEAL_STATE_MODE" : "AUTO", + "NUM_PARTITIONS" : "8", + "REBALANCE_MODE" : "SEMI_AUTO", + "REPLICAS" : "0", + "STATE_MODEL_DEF_REF" : "MasterSlave", + "STATE_MODEL_FACTORY_NAME" : "DEFAULT" + }, + "listFields" : { + }, + "mapFields" : { + "MyDB_0" : { + "localhost_1001" : "MASTER", + "localhost_1002" : "SLAVE" + } + } + } + > newIdealState.json + curl -d @'./newIdealState.json' -H 'Content-Type: application/json' http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB/idealState + ``` + + * Add resource property + + ``` + curl -d 'jsonParameters={"command":"addResourceProperty","REBALANCE_TIMER_PERIOD":"500"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB/idealState + ``` + +* _/clusters/{clusterName}/resourceGroups/{resourceName}/externalView_ + * Show resource external view + + ``` + curl http://localhost:8100/clusters/MyCluster/resourceGroups/MyDB/externalView + ``` +* _/clusters/{clusterName}/instances_ + * List all instances + + ``` + curl http://localhost:8100/clusters/MyCluster/instances + ``` + + * Add an instance + + ``` + curl -d 'jsonParameters={"command":"addInstance","instanceNames":"localhost_1001"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/instances + ``` + + * Swap an instance + + ``` + curl -d 'jsonParameters={"command":"swapInstance","oldInstance":"localhost_1001", "newInstance":"localhost_1002"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/instances + ``` +* _/clusters/{clusterName}/instances/{instanceName}_ + * Show instance information + + ``` + curl http://localhost:8100/clusters/MyCluster/instances/localhost_1001 + ``` + + * Enable/disable an instance + + ``` + curl -d 'jsonParameters={"command":"enableInstance","enabled":"false"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/instances/localhost_1001 + ``` + + * Drop an instance + + ``` + curl -X DELETE http://localhost:8100/clusters/MyCluster/instances/localhost_1001 + ``` + + * Disable/enable partitions on an instance + + ``` + curl -d 'jsonParameters={"command":"enablePartition","resource": "MyDB","partition":"MyDB_0", "enabled" : "false"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/instances/localhost_1001 + ``` + + * Reset an erroneous partition on an instance + + ``` + curl -d 'jsonParameters={"command":"resetPartition","resource": "MyDB","partition":"MyDB_0"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/instances/localhost_1001 + ``` + + * Reset all erroneous partitions on an instance + + ``` + curl -d 'jsonParameters={"command":"resetInstance"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/instances/localhost_1001 + ``` + +* _/clusters/{clusterName}/configs_ + * Get user cluster level config + + ``` + curl http://localhost:8100/clusters/MyCluster/configs/cluster + ``` + + * Set user cluster level config + + ``` + curl -d 'jsonParameters={"command":"setConfig","configs":"key1=value1,key2=value2"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/configs/cluster + ``` + + * Remove user cluster level config + + ``` + curl -d 'jsonParameters={"command":"removeConfig","configs":"key1,key2"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/configs/cluster + ``` + + * Get/set/remove user participant level config + + ``` + curl -d 'jsonParameters={"command":"setConfig","configs":"key1=value1,key2=value2"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/configs/participant/localhost_1001 + ``` + + * Get/set/remove resource level config + + ``` + curl -d 'jsonParameters={"command":"setConfig","configs":"key1=value1,key2=value2"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/configs/resource/MyDB + ``` + +* _/clusters/{clusterName}/controller_ + * Show controller information + + ``` + curl http://localhost:8100/clusters/MyCluster/Controller + ``` + + * Enable/disable cluster + + ``` + curl -d 'jsonParameters={"command":"enableCluster","enabled":"false"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/Controller + ``` + +* _/zkPath/{path}_ + * Get information for zookeeper path + + ``` + curl http://localhost:8100/zkPath/MyCluster + ``` + +* _/clusters/{clusterName}/StateModelDefs_ + * Show all state model definitions + + ``` + curl http://localhost:8100/clusters/MyCluster/StateModelDefs + ``` + + * Add a state mdoel definition + + ``` + echo jsonParameters={ + "command":"addStateModelDef" + }&newStateModelDef={ + "id" : "OnlineOffline", + "simpleFields" : { + "INITIAL_STATE" : "OFFLINE" + }, + "listFields" : { + "STATE_PRIORITY_LIST" : [ "ONLINE", "OFFLINE", "DROPPED" ], + "STATE_TRANSITION_PRIORITYLIST" : [ "OFFLINE-ONLINE", "ONLINE-OFFLINE", "OFFLINE-DROPPED" ] + }, + "mapFields" : { + "DROPPED.meta" : { + "count" : "-1" + }, + "OFFLINE.meta" : { + "count" : "-1" + }, + "OFFLINE.next" : { + "DROPPED" : "DROPPED", + "ONLINE" : "ONLINE" + }, + "ONLINE.meta" : { + "count" : "R" + }, + "ONLINE.next" : { + "DROPPED" : "OFFLINE", + "OFFLINE" : "OFFLINE" + } + } + } + > newStateModelDef.json + curl -d @'./untitled.txt' -H 'Content-Type: application/json' http://localhost:8100/clusters/MyCluster/StateModelDefs + ``` + +* _/clusters/{clusterName}/StateModelDefs/{stateModelDefName}_ + * Show a state model definition + + ``` + curl http://localhost:8100/clusters/MyCluster/StateModelDefs/OnlineOffline + ``` + +* _/clusters/{clusterName}/constraints/{constraintType}_ + * Show all contraints + + ``` + curl http://localhost:8100/clusters/MyCluster/constraints/MESSAGE_CONSTRAINT + ``` + + * Set a contraint + + ``` + curl -d 'jsonParameters={"constraintAttributes":"RESOURCE=MyDB,CONSTRAINT_VALUE=1"}' -H "Content-Type: application/json" http://localhost:8100/clusters/MyCluster/constraints/MESSAGE_CONSTRAINT/MyConstraint + ``` + + * Remove a constraint + + ``` + curl -X DELETE http://localhost:8100/clusters/MyCluster/constraints/MESSAGE_CONSTRAINT/MyConstraint + ``` From 19c684174e7d9f6bb84a7feab255a505c6f6ad2c Mon Sep 17 00:00:00 2001 From: Kishore Gopalakrishna Date: Tue, 3 Sep 2013 09:24:56 -0700 Subject: [PATCH 012/113] [HELIX-215] Adding new recipe on how to write a custom rebalancer --- helix-core/pom.xml | 9 + .../stages/BestPossibleStateCalcStage.java | 8 +- .../org/apache/helix/model/IdealState.java | 14 +- .../apache/helix/tools/YAMLClusterSetup.java | 287 ++++++++++++++++++ pom.xml | 10 + recipes/pom.xml | 1 + recipes/user-defined-rebalancer/README.md | 254 ++++++++++++++++ recipes/user-defined-rebalancer/pom.xml | 139 +++++++++ .../src/main/config/log4j.properties | 31 ++ .../helix/userdefinedrebalancer/Lock.java | 48 +++ .../userdefinedrebalancer/LockFactory.java | 34 +++ .../LockManagerDemo.java | 192 ++++++++++++ .../LockManagerRebalancer.java | 84 +++++ .../userdefinedrebalancer/LockProcess.java | 79 +++++ .../main/resources/lock-manager-config.yaml | 69 +++++ .../src/test/conf/testng.xml | 27 ++ 16 files changed, 1279 insertions(+), 7 deletions(-) create mode 100644 helix-core/src/main/java/org/apache/helix/tools/YAMLClusterSetup.java create mode 100644 recipes/user-defined-rebalancer/README.md create mode 100644 recipes/user-defined-rebalancer/pom.xml create mode 100644 recipes/user-defined-rebalancer/src/main/config/log4j.properties create mode 100644 recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/Lock.java create mode 100644 recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockFactory.java create mode 100644 recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerDemo.java create mode 100644 recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerRebalancer.java create mode 100644 recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockProcess.java create mode 100644 recipes/user-defined-rebalancer/src/main/resources/lock-manager-config.yaml create mode 100644 recipes/user-defined-rebalancer/src/test/conf/testng.xml diff --git a/helix-core/pom.xml b/helix-core/pom.xml index af04d858c4..22d1b2c843 100644 --- a/helix-core/pom.xml +++ b/helix-core/pom.xml @@ -150,6 +150,11 @@ under the License. guava r09 + + org.yaml + snakeyaml + 1.12 + @@ -213,6 +218,10 @@ under the License. org.apache.helix.tools.JmxDumper JmxDumper + + org.apache.helix.tools.YAMLClusterSetup + yaml-cluster-setup + diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java index e812e16700..11955f52cb 100644 --- a/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java +++ b/helix-core/src/main/java/org/apache/helix/controller/stages/BestPossibleStateCalcStage.java @@ -118,9 +118,11 @@ private BestPossibleStateOutput compute(ClusterEvent event, Map newStateMap = partitionStateAssignment.getReplicaMap(partition); - output.setState(resourceName, partition, newStateMap); + if (partitionStateAssignment != null) { + for (Partition partition : resource.getPartitions()) { + Map newStateMap = partitionStateAssignment.getReplicaMap(partition); + output.setState(resourceName, partition, newStateMap); + } } } return output; diff --git a/helix-core/src/main/java/org/apache/helix/model/IdealState.java b/helix-core/src/main/java/org/apache/helix/model/IdealState.java index e14940a17e..90a2dfff93 100644 --- a/helix-core/src/main/java/org/apache/helix/model/IdealState.java +++ b/helix-core/src/main/java/org/apache/helix/model/IdealState.java @@ -461,13 +461,19 @@ public String getInstanceGroupTag() { return _record.getSimpleField(IdealStateProperty.INSTANCE_GROUP_TAG.toString()); } + /** + * Update the ideal state mapping from a ResourceAssignment + * @param assignment ResourceAssignment result from the rebalancer + */ public void updateFromAssignment(ResourceAssignment assignment) { _record.getMapFields().clear(); _record.getListFields().clear(); - for (Partition partition : assignment.getMappedPartitions()) { - Map replicaMap = assignment.getReplicaMap(partition); - setInstanceStateMap(partition.getPartitionName(), replicaMap); - setPreferenceList(partition.getPartitionName(), new ArrayList(replicaMap.keySet())); + if (assignment != null) { + for (Partition partition : assignment.getMappedPartitions()) { + Map replicaMap = assignment.getReplicaMap(partition); + setInstanceStateMap(partition.getPartitionName(), replicaMap); + setPreferenceList(partition.getPartitionName(), new ArrayList(replicaMap.keySet())); + } } } diff --git a/helix-core/src/main/java/org/apache/helix/tools/YAMLClusterSetup.java b/helix-core/src/main/java/org/apache/helix/tools/YAMLClusterSetup.java new file mode 100644 index 0000000000..c7233edf71 --- /dev/null +++ b/helix-core/src/main/java/org/apache/helix/tools/YAMLClusterSetup.java @@ -0,0 +1,287 @@ +package org.apache.helix.tools; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.helix.HelixAdmin; +import org.apache.helix.HelixException; +import org.apache.helix.manager.zk.ZKHelixAdmin; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.IdealState.RebalanceMode; +import org.apache.helix.model.InstanceConfig; +import org.apache.helix.model.StateModelDefinition; +import org.apache.helix.tools.YAMLClusterSetup.YAMLClusterConfig.ParticipantConfig; +import org.apache.helix.tools.YAMLClusterSetup.YAMLClusterConfig.ResourceConfig; +import org.apache.helix.tools.YAMLClusterSetup.YAMLClusterConfig.ResourceConfig.ConstraintsConfig; +import org.apache.helix.tools.YAMLClusterSetup.YAMLClusterConfig.ResourceConfig.StateModelConfig; +import org.apache.log4j.Logger; +import org.yaml.snakeyaml.Yaml; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Supports HelixAdmin operations specified by a YAML configuration file defining a cluster, + * resources, participants, etc. + * See the user-rebalanced-lock-manager recipe for an annotated example file. + */ +public class YAMLClusterSetup { + private static final Logger LOG = Logger.getLogger(YAMLClusterSetup.class); + + private final String _zkAddress; + + /** + * Start the YAML parser for a given zookeeper instance + * @param zkAddress + */ + public YAMLClusterSetup(String zkAddress) { + _zkAddress = zkAddress; + } + + /** + * Set up the cluster by parsing a YAML file. + * @param input InputStream representing the file + * @return ClusterConfig Java wrapper of the configuration file + */ + public YAMLClusterConfig setupCluster(InputStream input) { + // parse the YAML + Yaml yaml = new Yaml(); + YAMLClusterConfig cfg = yaml.loadAs(input, YAMLClusterConfig.class); + + // create the cluster + HelixAdmin helixAdmin = new ZKHelixAdmin(_zkAddress); + if (cfg.clusterName == null) { + throw new HelixException("Cluster name is required!"); + } + helixAdmin.addCluster(cfg.clusterName); + + // add each participant + if (cfg.participants != null) { + for (ParticipantConfig participant : cfg.participants) { + helixAdmin.addInstance(cfg.clusterName, getInstanceCfg(participant)); + } + } + + // add each resource + if (cfg.resources != null) { + for (ResourceConfig resource : cfg.resources) { + if (resource.name == null) { + throw new HelixException("Resources must be named!"); + } + if (resource.stateModel == null || resource.stateModel.name == null) { + throw new HelixException("Resource must specify a named state model!"); + } + // if states is null, assume using a built-in or already-added state model + if (resource.stateModel.states != null) { + StateModelDefinition stateModelDef = + getStateModelDef(resource.stateModel, resource.constraints); + helixAdmin.addStateModelDef(cfg.clusterName, resource.stateModel.name, stateModelDef); + } + int partitions = 1; + int replicas = 1; + if (resource.partitions != null) { + if (resource.partitions.containsKey("count")) { + partitions = resource.partitions.get("count"); + } + if (resource.partitions.containsKey("replicas")) { + replicas = resource.partitions.get("replicas"); + } + } + + if (resource.rebalancer == null || !resource.rebalancer.containsKey("mode")) { + throw new HelixException("Rebalance mode is required!"); + } + helixAdmin.addResource(cfg.clusterName, resource.name, partitions, + resource.stateModel.name, resource.rebalancer.get("mode")); + // user-defined rebalancer + if (resource.rebalancer.containsKey("class") + && resource.rebalancer.get("mode").equals(RebalanceMode.USER_DEFINED.toString())) { + IdealState idealState = helixAdmin.getResourceIdealState(cfg.clusterName, resource.name); + idealState.setRebalancerClassName(resource.rebalancer.get("class")); + helixAdmin.setResourceIdealState(cfg.clusterName, resource.name, idealState); + } + helixAdmin.rebalance(cfg.clusterName, resource.name, replicas); + } + } + return cfg; + } + + private static InstanceConfig getInstanceCfg(ParticipantConfig participant) { + if (participant == null || participant.name == null || participant.host == null + || participant.port == null) { + throw new HelixException("Participant must have a specified name, host, and port!"); + } + InstanceConfig instanceCfg = new InstanceConfig(participant.name); + instanceCfg.setHostName(participant.host); + instanceCfg.setPort(participant.port.toString()); + return instanceCfg; + } + + private static StateModelDefinition getStateModelDef(StateModelConfig stateModel, + ConstraintsConfig constraints) { + // Use a builder to define the state model + StateModelDefinition.Builder builder = new StateModelDefinition.Builder(stateModel.name); + if (stateModel.states == null || stateModel.states.size() == 0) { + throw new HelixException("List of states are required in a state model!"); + } + Set stateSet = new HashSet(stateModel.states); + if (stateModel.initialState == null) { + throw new HelixException("Initial state is required in a state model!"); + } else if (!stateSet.contains(stateModel.initialState)) { + throw new HelixException("Initial state is not a valid state"); + } + builder.initialState(stateModel.initialState); + + // Build a helper for state priorities + Map statePriorities = new HashMap(); + if (constraints != null && constraints.state != null && constraints.state.priorityList != null) { + int statePriority = 0; + for (String state : constraints.state.priorityList) { + if (!stateSet.contains(state)) { + throw new HelixException("State " + state + + " in the state priority list is not in the state list!"); + } + statePriorities.put(state, statePriority); + statePriority++; + } + } + + // Add states, set state priorities + for (String state : stateModel.states) { + if (statePriorities.containsKey(state)) { + builder.addState(state, statePriorities.get(state)); + } else { + builder.addState(state); + } + } + + // Set state counts + for (Map counts : constraints.state.counts) { + String state = counts.get("name"); + if (!stateSet.contains(state)) { + throw new HelixException("State " + state + " has a count, but not in the state list!"); + } + builder.dynamicUpperBound(state, counts.get("count")); + } + + // Build a helper for transition priorities + Map transitionPriorities = new HashMap(); + if (constraints != null && constraints.transition != null + && constraints.transition.priorityList != null) { + int transitionPriority = 0; + for (String transition : constraints.transition.priorityList) { + transitionPriorities.put(transition, transitionPriority); + transitionPriority++; + } + } + + // Add the transitions + if (stateModel.transitions == null || stateModel.transitions.size() == 0) { + throw new HelixException("Transitions are required!"); + } + for (Map transitions : stateModel.transitions) { + String name = transitions.get("name"); + String from = transitions.get("from"); + String to = transitions.get("to"); + if (name == null || from == null || to == null) { + throw new HelixException("All transitions must have a name, a from state, and a to state"); + } + if (transitionPriorities.containsKey(name)) { + builder.addTransition(from, to, transitionPriorities.get(name)); + } else { + builder.addTransition(from, to); + } + } + + return builder.build(); + } + + /** + * Java wrapper for the YAML input file + */ + public static class YAMLClusterConfig { + public String clusterName; + public List resources; + public List participants; + + public static class ResourceConfig { + public String name; + public Map rebalancer; + public Map partitions; + public StateModelConfig stateModel; + public ConstraintsConfig constraints; + + public static class StateModelConfig { + public String name; + public List states; + public List> transitions; + public String initialState; + } + + public static class ConstraintsConfig { + public StateConstraintsConfig state; + public TransitionConstraintsConfig transition; + + public static class StateConstraintsConfig { + public List> counts; + public List priorityList; + } + + public static class TransitionConstraintsConfig { + public List priorityList; + } + } + } + + public static class ParticipantConfig { + public String name; + public String host; + public Integer port; + } + } + + /** + * Start a cluster defined by a YAML file + * @param args zkAddr, yamlFile + */ + public static void main(String[] args) { + if (args.length < 2) { + LOG.error("USAGE: YAMLClusterSetup zkAddr yamlFile"); + return; + } + String zkAddress = args[0]; + String yamlFile = args[1]; + + InputStream input; + try { + input = new FileInputStream(new File(yamlFile)); + } catch (FileNotFoundException e) { + LOG.error("Could not open " + yamlFile); + return; + } + new YAMLClusterSetup(zkAddress).setupCluster(input); + } +} diff --git a/pom.xml b/pom.xml index ee6f5733be..68404106a0 100644 --- a/pom.xml +++ b/pom.xml @@ -164,6 +164,11 @@ under the License. false + + Sonatype-public + SnakeYAML repository + http://oss.sonatype.org/content/groups/public/ + @@ -285,6 +290,11 @@ under the License. testng 6.0.1 + + org.yaml + snakeyaml + 1.12 + diff --git a/recipes/pom.xml b/recipes/pom.xml index d0a93b1054..36676505b9 100644 --- a/recipes/pom.xml +++ b/recipes/pom.xml @@ -33,6 +33,7 @@ under the License. rabbitmq-consumer-group rsync-replicated-file-system distributed-lock-manager + user-defined-rebalancer task-execution service-discovery diff --git a/recipes/user-defined-rebalancer/README.md b/recipes/user-defined-rebalancer/README.md new file mode 100644 index 0000000000..3dca51c053 --- /dev/null +++ b/recipes/user-defined-rebalancer/README.md @@ -0,0 +1,254 @@ + +Distributed lock manager with a user-defined rebalancer and YAML configuration +------------------------------------------------------------------------------ +This recipe is a second take on the distributed lock manager example with two key differences + * Instead of specifying the cluster using the HelixAdmin Java API, a YAML file indicates the cluster, its resources, and its participants. This is a simplified way to bootstrap cluster creation with a compact, logical hierarchy. + * The rebalancing process (i.e. the algorithm that uses the cluster state to determine an assignment of locks to participants) is specified in a class defined by the recipe itself, completely independent of Helix. + +For additional background and motivation, see the distributed-lock-manager recipe. + +### YAML Cluster Setup +The YAML configuration below specifies a state model for a lock in which it can be locked and unlocked. At most one participant can hold the lock at any time, and there are 12 locks to distribute across 4 participants. + +``` +clusterName: lock-manager-custom-rebalancer # unique name for the cluster +resources: + - name: lock-group # unique resource name + rebalancer: # we will provide our own rebalancer + mode: USER_DEFINED + class: org.apache.helix.userdefinedrebalancer.LockManagerRebalancer + partitions: + count: 12 # number of locks + replicas: 1 # number of simultaneous holders for each lock + stateModel: + name: lock-unlock # unique model name + states: [LOCKED, RELEASED, DROPPED] # the list of possible states + transitions: # the list of possible transitions + - name: Unlock + from: LOCKED + to: RELEASED + - name: Lock + from: RELEASED + to: LOCKED + - name: DropLock + from: LOCKED + to: DROPPED + - name: DropUnlock + from: RELEASED + to: DROPPED + - name: Undrop + from: DROPPED + to: RELEASED + initialState: RELEASED + constraints: + state: + counts: # maximum number of replicas of a partition that can be in each state + - name: LOCKED + count: "1" + - name: RELEASED + count: "-1" + - name: DROPPED + count: "-1" + priorityList: [LOCKED, RELEASED, DROPPED] # states in order of priority + transition: # transitions priority to enforce order that transitions occur + priorityList: [Unlock, Lock, Undrop, DropUnlock, DropLock] +participants: # list of nodes that can acquire locks + - name: localhost_12001 + host: localhost + port: 12001 + - name: localhost_12002 + host: localhost + port: 12002 + - name: localhost_12003 + host: localhost + port: 12003 +``` + +### User-Defined Rebalancer +The implementation of the Rebalancer interface is quite simple. It assumes a Lock/Unlock model where the lock state has highest priority. It uses a mod-based approach to fairly assign locks to participants so that no participant holds more than one instance of a lock, and each lock is only assigned to as many participants as can hold the same lock simultaneously. In the configuration above, only one participant can hold a given lock in the locked state. + +The result is a ResourceMapping, which maps each lock to its holder and its lock state. In Helix terminology, the lock manager is the resource, a lock is a partition, its holder is a participant, and the lock state is the current state of the lock based on one of the pre-defined states in the state model. + +``` +@Override +public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, + CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { + // Initialize an empty mapping of locks to participants + ResourceAssignment assignment = new ResourceAssignment(resource.getResourceName()); + + // Get the list of live participants in the cluster + List liveParticipants = new ArrayList(clusterData.getLiveInstances().keySet()); + + // Get the state model (should be a simple lock/unlock model) and the highest-priority state + String stateModelName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); + if (stateModelDef.getStatesPriorityList().size() < 1) { + LOG.error("Invalid state model definition. There should be at least one state."); + return assignment; + } + String lockState = stateModelDef.getStatesPriorityList().get(0); + + // Count the number of participants allowed to lock each lock + String stateCount = stateModelDef.getNumInstancesPerState(lockState); + int lockHolders = 0; + try { + // a numeric value is a custom-specified number of participants allowed to lock the lock + lockHolders = Integer.parseInt(stateCount); + } catch (NumberFormatException e) { + LOG.error("Invalid state model definition. The lock state does not have a valid count"); + return assignment; + } + + // Fairly assign the lock state to the participants using a simple mod-based sequential + // assignment. For instance, if each lock can be held by 3 participants, lock 0 would be held + // by participants (0, 1, 2), lock 1 would be held by (1, 2, 3), and so on, wrapping around the + // number of participants as necessary. + // This assumes a simple lock-unlock model where the only state of interest is which nodes have + // acquired each lock. + int i = 0; + for (Partition partition : resource.getPartitions()) { + Map replicaMap = new HashMap(); + for (int j = i; j < i + lockHolders; j++) { + int participantIndex = j % liveParticipants.size(); + String participant = liveParticipants.get(participantIndex); + // enforce that a participant can only have one instance of a given lock + if (!replicaMap.containsKey(participant)) { + replicaMap.put(participant, lockState); + } + } + assignment.addReplicaMap(partition, replicaMap); + i++; + } + return assignment; +} +``` +---------------------------------------------------------------------------------------- + +#### In Action + +##### Specifying a Lock StateModel +In our configuration file, we indicated a special state model with two key states: LOCKED and RELEASED. Thus, we need to provide for the participant a subclass of StateModel that can respond to transitions between those states. + +``` +public class Lock extends StateModel { + private String lockName; + + public Lock(String lockName) { + this.lockName = lockName; + } + + @Transition(from = "RELEASED", to = "LOCKED") + public void lock(Message m, NotificationContext context) { + System.out.println(context.getManager().getInstanceName() + " acquired lock:" + lockName); + } + + @Transition(from = "LOCKED", to = "RELEASED") + public void release(Message m, NotificationContext context) { + System.out.println(context.getManager().getInstanceName() + " releasing lock:" + lockName); + } +} +``` + +##### Loading the configuration file +We include a YAML file parser that will set up the cluster according to the specifications of the file. Here is the code that this example uses to set up the cluster: + +``` +YAMLClusterSetup setup = new YAMLClusterSetup(zkAddress); +InputStream input = + Thread.currentThread().getContextClassLoader() + .getResourceAsStream("lock-manager-config.yaml"); +YAMLClusterSetup.YAMLClusterConfig config = setup.setupCluster(input); +``` +At this point, the cluster is set up and the configuration is persisted on Zookeeper. The config variable contains a snapshot of this configuration for further access. + +##### Building +``` +git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git +cd incubator-helix +mvn clean install package -DskipTests +cd recipes/user-rebalanced-lock-manager/target/user-rebalanced-lock-manager-pkg/bin +chmod +x * +./lock-manager-demo +``` + +##### Output + +``` +./lock-manager-demo +STARTING localhost_12002 +STARTING localhost_12001 +STARTING localhost_12003 +STARTED localhost_12001 +STARTED localhost_12003 +STARTED localhost_12002 +localhost_12003 acquired lock:lock-group_4 +localhost_12002 acquired lock:lock-group_8 +localhost_12001 acquired lock:lock-group_10 +localhost_12001 acquired lock:lock-group_3 +localhost_12001 acquired lock:lock-group_6 +localhost_12003 acquired lock:lock-group_0 +localhost_12002 acquired lock:lock-group_5 +localhost_12001 acquired lock:lock-group_9 +localhost_12002 acquired lock:lock-group_2 +localhost_12003 acquired lock:lock-group_7 +localhost_12003 acquired lock:lock-group_11 +localhost_12002 acquired lock:lock-group_1 +lockName acquired By +====================================== +lock-group_0 localhost_12003 +lock-group_1 localhost_12002 +lock-group_10 localhost_12001 +lock-group_11 localhost_12003 +lock-group_2 localhost_12002 +lock-group_3 localhost_12001 +lock-group_4 localhost_12003 +lock-group_5 localhost_12002 +lock-group_6 localhost_12001 +lock-group_7 localhost_12003 +lock-group_8 localhost_12002 +lock-group_9 localhost_12001 +Stopping the first participant +localhost_12001 Interrupted +localhost_12002 acquired lock:lock-group_3 +localhost_12003 acquired lock:lock-group_6 +localhost_12003 acquired lock:lock-group_10 +localhost_12002 acquired lock:lock-group_9 +lockName acquired By +====================================== +lock-group_0 localhost_12003 +lock-group_1 localhost_12002 +lock-group_10 localhost_12003 +lock-group_11 localhost_12003 +lock-group_2 localhost_12002 +lock-group_3 localhost_12002 +lock-group_4 localhost_12003 +lock-group_5 localhost_12002 +lock-group_6 localhost_12003 +lock-group_7 localhost_12003 +lock-group_8 localhost_12002 +lock-group_9 localhost_12002 +``` + +---------------------------------------------------------------------------------------- + + + + + diff --git a/recipes/user-defined-rebalancer/pom.xml b/recipes/user-defined-rebalancer/pom.xml new file mode 100644 index 0000000000..ebd972c0be --- /dev/null +++ b/recipes/user-defined-rebalancer/pom.xml @@ -0,0 +1,139 @@ + + + + 4.0.0 + + + org.apache.helix.recipes + recipes + 0.6.2-incubating-SNAPSHOT + + + user-defined-rebalancer + bundle + Apache Helix :: Recipes :: user-defined-rebalancer + + + + org.apache.helix*, + org.apache.log4j, + * + + org.apache.helix.userdefinedrebalancer*;version="${project.version};-noimport:=true + + + + + org.testng + testng + 6.0.1 + + + org.apache.helix + helix-core + + + log4j + log4j + + + javax.mail + mail + + + javax.jms + jms + + + com.sun.jdmk + jmxtools + + + com.sun.jmx + jmxri + + + + + + + + ${basedir}/src/main/resources + true + + + + + + org.codehaus.mojo + appassembler-maven-plugin + + + + + + + true + ${project.build.directory}/${project.artifactId}-pkg + + -Xms512m -Xmx512m + + + windows + unix + + + + + package + + assemble + + + + + + org.apache.rat + apache-rat-plugin + + + + + + + + + + org.codehaus.mojo + appassembler-maven-plugin + + + + org.apache.helix.userdefinedrebalancer.LockManagerDemo + lock-manager-demo + + + + + + + diff --git a/recipes/user-defined-rebalancer/src/main/config/log4j.properties b/recipes/user-defined-rebalancer/src/main/config/log4j.properties new file mode 100644 index 0000000000..4b3dc31577 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/config/log4j.properties @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Set root logger level to DEBUG and its only appender to A1. +log4j.rootLogger=ERROR,A1 + +# A1 is set to be a ConsoleAppender. +log4j.appender.A1=org.apache.log4j.ConsoleAppender + +# A1 uses PatternLayout. +log4j.appender.A1.layout=org.apache.log4j.PatternLayout +log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n + +log4j.logger.org.I0Itec=ERROR +log4j.logger.org.apache=ERROR diff --git a/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/Lock.java b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/Lock.java new file mode 100644 index 0000000000..ceba1edcf5 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/Lock.java @@ -0,0 +1,48 @@ +package org.apache.helix.userdefinedrebalancer; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.NotificationContext; +import org.apache.helix.model.Message; +import org.apache.helix.participant.statemachine.StateModel; +import org.apache.helix.participant.statemachine.StateModelInfo; +import org.apache.helix.participant.statemachine.Transition; + +@StateModelInfo(initialState = "RELEASED", states = { + "RELEASED", "LOCKED" +}) +public class Lock extends StateModel { + private String lockName; + + public Lock(String lockName) { + this.lockName = lockName; + } + + @Transition(from = "RELEASED", to = "LOCKED") + public void lock(Message m, NotificationContext context) { + System.out.println(context.getManager().getInstanceName() + " acquired lock:" + lockName); + } + + @Transition(from = "LOCKED", to = "RELEASED") + public void release(Message m, NotificationContext context) { + System.out.println(context.getManager().getInstanceName() + " releasing lock:" + lockName); + } + +} diff --git a/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockFactory.java b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockFactory.java new file mode 100644 index 0000000000..3aec20c3d6 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockFactory.java @@ -0,0 +1,34 @@ +package org.apache.helix.userdefinedrebalancer; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.helix.participant.statemachine.StateModelFactory; + +/** + * This factory allows a participant to get the appropriate state model callbacks for the lock + * manager state model. This is used exactly once per participant to get a valid instance of a Lock, + * and then the same Lock instance is used for all state transition callbacks. + */ +public class LockFactory extends StateModelFactory { + @Override + public Lock createNewStateModel(String lockName) { + return new Lock(lockName); + } +} diff --git a/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerDemo.java b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerDemo.java new file mode 100644 index 0000000000..727c5b7ce8 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerDemo.java @@ -0,0 +1,192 @@ +package org.apache.helix.userdefinedrebalancer; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.io.File; +import java.io.InputStream; +import java.util.Map; +import java.util.TreeSet; + +import org.I0Itec.zkclient.IDefaultNameSpace; +import org.I0Itec.zkclient.ZkClient; +import org.I0Itec.zkclient.ZkServer; +import org.apache.commons.io.FileUtils; +import org.apache.helix.HelixAdmin; +import org.apache.helix.HelixManager; +import org.apache.helix.controller.HelixControllerMain; +import org.apache.helix.manager.zk.ZKHelixAdmin; +import org.apache.helix.model.ExternalView; +import org.apache.helix.tools.YAMLClusterSetup; +import org.apache.log4j.Logger; + +public class LockManagerDemo { + private static final Logger LOG = Logger.getLogger(LockManagerDemo.class); + + /** + * LockManagerDemo clusterName, numInstances, lockGroupName, numLocks + * @param args + * @throws Exception + */ + public static void main(String[] args) throws Exception { + final String zkAddress = "localhost:2199"; + + // default participant parameters in case the config does not specify them + int numInstances = 3; + boolean instancesSpecified = false; + Thread[] processArray = new Thread[numInstances]; + + // HelixManager for setting up the controller + HelixManager controllerManager = null; + + // Name of the lock group resource (specified by the config file) + String lockGroupName = null; + try { + startLocalZookeeper(2199); + YAMLClusterSetup setup = new YAMLClusterSetup(zkAddress); + InputStream input = + Thread.currentThread().getContextClassLoader() + .getResourceAsStream("lock-manager-config.yaml"); + final YAMLClusterSetup.YAMLClusterConfig config = setup.setupCluster(input); + if (config == null) { + LOG.error("Invalid YAML configuration"); + return; + } + if (config.resources == null || config.resources.isEmpty()) { + LOG.error("Need to specify a resource!"); + return; + } + + // save resource name + lockGroupName = config.resources.get(0).name; + + // save participants if specified + if (config.participants != null && config.participants.size() > 0) { + numInstances = config.participants.size(); + instancesSpecified = true; + processArray = new Thread[numInstances]; + } + + // run each participant + for (int i = 0; i < numInstances; i++) { + String participantName; + if (instancesSpecified) { + participantName = config.participants.get(i).name; + } else { + participantName = "localhost_" + (12000 + i); + } + final String instanceName = participantName; + processArray[i] = new Thread(new Runnable() { + + @Override + public void run() { + LockProcess lockProcess = null; + + try { + lockProcess = + new LockProcess(config.clusterName, zkAddress, instanceName, + config.resources.get(0).stateModel.name); + lockProcess.start(); + Thread.currentThread().join(); + } catch (InterruptedException e) { + System.out.println(instanceName + " Interrupted"); + if (lockProcess != null) { + lockProcess.stop(); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + }); + processArray[i].start(); + } + Thread.sleep(3000); + + // start the controller + controllerManager = + HelixControllerMain.startHelixController(zkAddress, config.clusterName, "controller", + HelixControllerMain.STANDALONE); + Thread.sleep(5000); + + // HelixAdmin for querying cluster state + HelixAdmin admin = new ZKHelixAdmin(zkAddress); + + printStatus(admin, config.clusterName, lockGroupName); + + // stop one participant + System.out.println("Stopping the first participant"); + processArray[0].interrupt(); + Thread.sleep(3000); + printStatus(admin, config.clusterName, lockGroupName); + Thread.currentThread().join(); + } catch (Exception e) { + e.printStackTrace(); + } finally { + if (controllerManager != null) { + controllerManager.disconnect(); + } + for (Thread process : processArray) { + if (process != null) { + process.interrupt(); + } + } + } + } + + private static void printStatus(HelixAdmin admin, String cluster, String resource) { + ExternalView externalView = admin.getResourceExternalView(cluster, resource); + TreeSet treeSet = new TreeSet(externalView.getPartitionSet()); + System.out.println("lockName" + "\t" + "acquired By"); + System.out.println("======================================"); + for (String lockName : treeSet) { + Map stateMap = externalView.getStateMap(lockName); + String acquiredBy = null; + if (stateMap != null) { + for (String instanceName : stateMap.keySet()) { + if ("LOCKED".equals(stateMap.get(instanceName))) { + acquiredBy = instanceName; + break; + } + } + } + System.out.println(lockName + "\t" + ((acquiredBy != null) ? acquiredBy : "NONE")); + } + } + + private static void startLocalZookeeper(int port) throws Exception { + ZkServer server = null; + String baseDir = "/tmp/IntegrationTest/"; + final String dataDir = baseDir + "zk/dataDir"; + final String logDir = baseDir + "/tmp/logDir"; + FileUtils.deleteDirectory(new File(dataDir)); + FileUtils.deleteDirectory(new File(logDir)); + + IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() { + @Override + public void createDefaultNameSpace(ZkClient zkClient) { + + } + }; + int zkPort = 2199; + server = new ZkServer(dataDir, logDir, defaultNameSpace, zkPort); + server.start(); + } + +} diff --git a/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerRebalancer.java b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerRebalancer.java new file mode 100644 index 0000000000..e65113c5f2 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockManagerRebalancer.java @@ -0,0 +1,84 @@ +package org.apache.helix.userdefinedrebalancer; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.helix.HelixManager; +import org.apache.helix.controller.rebalancer.Rebalancer; +import org.apache.helix.controller.stages.ClusterDataCache; +import org.apache.helix.controller.stages.CurrentStateOutput; +import org.apache.helix.model.IdealState; +import org.apache.helix.model.Partition; +import org.apache.helix.model.Resource; +import org.apache.helix.model.ResourceAssignment; +import org.apache.helix.model.StateModelDefinition; +import org.apache.log4j.Logger; + +public class LockManagerRebalancer implements Rebalancer { + private static final Logger LOG = Logger.getLogger(LockManagerRebalancer.class); + + @Override + public void init(HelixManager manager) { + // do nothing; this rebalancer is independent of the manager + } + + /** + * This rebalancer is invoked whenever there is a change in the cluster, including when new + * participants join or leave, or the configuration of any participant changes. It is written + * specifically to handle assignment of locks to nodes under the very simple lock-unlock state + * model. + */ + @Override + public ResourceAssignment computeResourceMapping(Resource resource, IdealState currentIdealState, + CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) { + // Initialize an empty mapping of locks to participants + ResourceAssignment assignment = new ResourceAssignment(resource.getResourceName()); + + // Get the list of live participants in the cluster + List liveParticipants = new ArrayList(clusterData.getLiveInstances().keySet()); + + // Get the state model (should be a simple lock/unlock model) and the highest-priority state + String stateModelName = currentIdealState.getStateModelDefRef(); + StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName); + if (stateModelDef.getStatesPriorityList().size() < 1) { + LOG.error("Invalid state model definition. There should be at least one state."); + return assignment; + } + String lockState = stateModelDef.getStatesPriorityList().get(0); + + // Count the number of participants allowed to lock each lock + String stateCount = stateModelDef.getNumInstancesPerState(lockState); + int lockHolders = 0; + try { + // a numeric value is a custom-specified number of participants allowed to lock the lock + lockHolders = Integer.parseInt(stateCount); + } catch (NumberFormatException e) { + LOG.error("Invalid state model definition. The lock state does not have a valid count"); + return assignment; + } + + // Fairly assign the lock state to the participants using a simple mod-based sequential + // assignment. For instance, if each lock can be held by 3 participants, lock 0 would be held + // by participants (0, 1, 2), lock 1 would be held by (1, 2, 3), and so on, wrapping around the + // number of participants as necessary. + // This assumes a simple lock-unlock model where the only state of interest is which nodes have + // acquired each lock. + int i = 0; + for (Partition partition : resource.getPartitions()) { + Map replicaMap = new HashMap(); + for (int j = i; j < i + lockHolders; j++) { + int participantIndex = j % liveParticipants.size(); + String participant = liveParticipants.get(participantIndex); + // enforce that a participant can only have one instance of a given lock + if (!replicaMap.containsKey(participant)) { + replicaMap.put(participant, lockState); + } + } + assignment.addReplicaMap(partition, replicaMap); + i++; + } + return assignment; + } +} diff --git a/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockProcess.java b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockProcess.java new file mode 100644 index 0000000000..ee363b5667 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/java/org/apache/helix/userdefinedrebalancer/LockProcess.java @@ -0,0 +1,79 @@ +package org.apache.helix.userdefinedrebalancer; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.List; + +import org.apache.helix.HelixManager; +import org.apache.helix.HelixManagerFactory; +import org.apache.helix.InstanceType; +import org.apache.helix.manager.zk.ZKHelixAdmin; +import org.apache.helix.model.InstanceConfig; + +public class LockProcess { + private final String clusterName; + private final String zkAddress; + private final String instanceName; + private final String stateModelName; + private HelixManager participantManager; + + LockProcess(String clusterName, String zkAddress, String instanceName, String stateModelName) { + this.clusterName = clusterName; + this.zkAddress = zkAddress; + this.instanceName = instanceName; + this.stateModelName = stateModelName; + + } + + public void start() throws Exception { + System.out.println("STARTING " + instanceName); + configureInstance(instanceName); + participantManager = + HelixManagerFactory.getZKHelixManager(clusterName, instanceName, InstanceType.PARTICIPANT, + zkAddress); + participantManager.getStateMachineEngine().registerStateModelFactory(stateModelName, + new LockFactory()); + participantManager.connect(); + System.out.println("STARTED " + instanceName); + } + + /** + * Configure the instance, the configuration of each node is available to + * other nodes. + * @param instanceName + */ + private void configureInstance(String instanceName) { + ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress); + + List instancesInCluster = helixAdmin.getInstancesInCluster(clusterName); + if (instancesInCluster == null || !instancesInCluster.contains(instanceName)) { + InstanceConfig config = new InstanceConfig(instanceName); + config.setHostName("localhost"); + config.setPort("12000"); + helixAdmin.addInstance(clusterName, config); + } + } + + public void stop() { + if (participantManager != null) { + participantManager.disconnect(); + } + } +} diff --git a/recipes/user-defined-rebalancer/src/main/resources/lock-manager-config.yaml b/recipes/user-defined-rebalancer/src/main/resources/lock-manager-config.yaml new file mode 100644 index 0000000000..b3128779c6 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/main/resources/lock-manager-config.yaml @@ -0,0 +1,69 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +clusterName: lock-manager-custom-rebalancer # unique name for the cluster +resources: + - name: lock-group # unique resource name + rebalancer: # we will provide our own rebalancer + mode: USER_DEFINED + class: org.apache.helix.userdefinedrebalancer.LockManagerRebalancer + partitions: + count: 12 # number of locks + replicas: 1 # number of simultaneous holders for each lock + stateModel: + name: lock-unlock # unique model name + states: [LOCKED, RELEASED, DROPPED] # the list of possible states + transitions: # the list of possible transitions + - name: Unlock + from: LOCKED + to: RELEASED + - name: Lock + from: RELEASED + to: LOCKED + - name: DropLock + from: LOCKED + to: DROPPED + - name: DropUnlock + from: RELEASED + to: DROPPED + - name: Undrop + from: DROPPED + to: RELEASED + initialState: RELEASED + constraints: + state: + counts: # maximum number of replicas of a partition that can be in each state + - name: LOCKED + count: "1" + - name: RELEASED + count: "-1" + - name: DROPPED + count: "-1" + priorityList: [LOCKED, RELEASED, DROPPED] # states in order of priority + transition: # transitions priority to enforce order that transitions occur + priorityList: [Unlock, Lock, Undrop, DropUnlock, DropLock] +participants: # list of nodes that can acquire locks + - name: localhost_12001 + host: localhost + port: 12001 + - name: localhost_12002 + host: localhost + port: 12002 + - name: localhost_12003 + host: localhost + port: 12003 \ No newline at end of file diff --git a/recipes/user-defined-rebalancer/src/test/conf/testng.xml b/recipes/user-defined-rebalancer/src/test/conf/testng.xml new file mode 100644 index 0000000000..58f0803678 --- /dev/null +++ b/recipes/user-defined-rebalancer/src/test/conf/testng.xml @@ -0,0 +1,27 @@ + + + + + + + + + + From 84fb26bd8a7ecc34cbea6951451c1d821279e011 Mon Sep 17 00:00:00 2001 From: zzhang Date: Tue, 3 Sep 2013 10:37:42 -0700 Subject: [PATCH 013/113] [HELIX-228] fix errors in quick start wiki page, rb=13940 --- src/site/markdown/Quickstart.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/site/markdown/Quickstart.md b/src/site/markdown/Quickstart.md index 574f98b4ed..1cc61e3d87 100644 --- a/src/site/markdown/Quickstart.md +++ b/src/site/markdown/Quickstart.md @@ -27,7 +27,7 @@ First, let\'s get Helix, either build it, or download. git clone https://git-wip-us.apache.org/repos/asf/incubator-helix.git cd incubator-helix git checkout tags/helix-0.6.1-incubating - mvn install package -DskipTests + ./build cd helix-core/target/helix-core-pkg/bin //This folder contains all the scripts used in following sections chmod +x * @@ -51,7 +51,7 @@ Helix provides command line interfaces to set up the cluster and view the cluste If you built the code ``` -cd helix/incubator-helix/helix-core/target/helix-core-pkg/bin +cd incubator-helix/helix-core/target/helix-core-pkg/bin ``` If you downloaded the release package, extract it. @@ -73,7 +73,7 @@ You can observe the components working together in this demo, which does the fol ##### Run the demo ``` -cd helix/incubator-helix/helix-core/target/helix-core-pkg/bin +cd incubator-helix/helix-core/target/helix-core-pkg/bin ./quickstart.sh ``` @@ -375,8 +375,8 @@ ExternalView for myDB: Now, let\'s look at one of the partitions: - ## helix-admin.sh --zkSvr --listResourceInfo - ./helix-admin.sh --zkSvr localhost:2199 --listResourceInfo mycluster myDB_0 + ## helix-admin.sh --zkSvr --listPartitionInfo + ./helix-admin.sh --zkSvr localhost:2199 --listPartitionInfo MYCLUSTER myDB myDB_0 #### Expand the Cluster From f90e0d8bb3f28d90506e36ecdae961868f4a11a4 Mon Sep 17 00:00:00 2001 From: Kanak Biscuitwala Date: Mon, 23 Sep 2013 13:15:10 -0700 Subject: [PATCH 014/113] [HELIX-250] Add Kanak as committer to website, rb=14287 --- pom.xml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 68404106a0..7f76ed24f5 100644 --- a/pom.xml +++ b/pom.xml @@ -110,7 +110,16 @@ under the License. Committer -8 - + + + kanak + Kanak Biscuitwala + kanak@apache.org + + Committer + + -8 + helix-core From 8a39b8a9916df10b569117a34ede5b79c0ffb6cb Mon Sep 17 00:00:00 2001 From: Kanak Biscuitwala Date: Mon, 23 Sep 2013 13:15:10 -0700 Subject: [PATCH 015/113] [HELIX-250] Add Kanak as committer to website, rb=14287 --- pom.xml | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 78 insertions(+), 6 deletions(-) diff --git a/pom.xml b/pom.xml index 68404106a0..6c1cf4cc6e 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ under the License. Olivier Lamy olamy@apache.org - PMC Member + Mentor Australia/Melbourne @@ -53,16 +53,16 @@ under the License. Patrick Hunt phunt@apache.org - PMC Member + Mentor -8 - Mahadev + mahadev Mahadev Konar mahadev@apache.org - PMC Member + Mentor -8 @@ -71,7 +71,7 @@ under the License. Owen O'Malley omalley@apache.org - PMC Member + Mentor -8 @@ -102,6 +102,15 @@ under the License. -8 + + TBA + Adam Silberstein + TBA@apache.org + + Committer + + -8 + ksurlaker Kapil Surlaker @@ -110,7 +119,70 @@ under the License. Committer -8 - + + + rms + Bob Schulman + rms@apache.org + + Committer + + -8 + + + rahula + Rahul Aggarwal + rahula@apache.org + + Committer + + -8 + + + chtyim + Terence Yim + chtyim@apache.org + + Committer + + -8 + + + santip + Santiago Perez + santip@apache.org + + Committer + + -8 + + + vinayakb + Vinayak Borkar + vinayakb@apache.org + + Committer + + -8 + + + sdas + Shirshanka Das + sdas@apache.org + + Committer + + -8 + + + kanak + Kanak Biscuitwala + kanak@apache.org + + Committer + + -8 + helix-core From 52717e40d523d1a5cd0314277493258540ed15e6 Mon Sep 17 00:00:00 2001 From: Kanak Biscuitwala Date: Wed, 25 Sep 2013 23:30:39 -0700 Subject: [PATCH 016/113] [HELIX-250] Add Kanak as committer to website, fix bug in deploy script --- deploySite.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 deploySite.sh diff --git a/deploySite.sh b/deploySite.sh old mode 100644 new mode 100755 index 8abe7906ac..47d685f93c --- a/deploySite.sh +++ b/deploySite.sh @@ -21,4 +21,4 @@ read -s -p "Enter Apache Username: " myusername echo "" read -s -p "Enter Apache Password: " mypassword -mvn clean site-deploy scm-publish:publish-scm -Dusername=$myusername -Dpassword=$mypassword -DskipTests $@ +mvn clean site-deploy scm-publish:publish-scm -Dusername="$myusername" -Dpassword="$mypassword" -DskipTests $@ From b9fe738797cd5228e8ecaa284c8874bfa19f1ff2 Mon Sep 17 00:00:00 2001 From: zzhang Date: Fri, 4 Oct 2013 11:20:17 -0700 Subject: [PATCH 017/113] [HELIX-264] fix zkclient#close() bug, rb=14483 --- .../org/apache/helix/manager/zk/ZkClient.java | 44 ++- .../java/org/apache/helix/ZkTestHelper.java | 46 ++- .../helix/manager/zk/TestZkFlapping.java | 272 ++++++++++++++++++ 3 files changed, 349 insertions(+), 13 deletions(-) create mode 100644 helix-core/src/test/java/org/apache/helix/manager/zk/TestZkFlapping.java diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/ZkClient.java b/helix-core/src/main/java/org/apache/helix/manager/zk/ZkClient.java index 5b3af6d9b4..5f58f1ba46 100644 --- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZkClient.java +++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZkClient.java @@ -62,8 +62,7 @@ public ZkClient(IZkConnection connection, int connectionTimeout, _zkSerializer = zkSerializer; if (LOG.isTraceEnabled()) { StackTraceElement[] calls = Thread.currentThread().getStackTrace(); - int min = Math.min(calls.length, 5); - LOG.trace("creating a zkclient. callstack: " + Arrays.asList(calls).subList(0, min)); + LOG.trace("creating a zkclient. callstack: " + Arrays.asList(calls)); } } @@ -122,10 +121,45 @@ public IZkConnection getConnection() { public void close() throws ZkInterruptedException { if (LOG.isTraceEnabled()) { StackTraceElement[] calls = Thread.currentThread().getStackTrace(); - int min = Math.min(calls.length, 5); - LOG.trace("closing a zkclient. callStack: " + Arrays.asList(calls).subList(0, min)); + LOG.trace("closing a zkclient. callStack: " + Arrays.asList(calls)); + } + + getEventLock().lock(); + try { + if (_connection == null) { + return; + } + + LOG.info("Closing zkclient: " + ((ZkConnection) _connection).getZookeeper()); + super.close(); + } catch (ZkInterruptedException e) { + /** + * HELIX-264: calling ZkClient#close() in its own eventThread context will + * throw ZkInterruptedException and skip ZkConnection#close() + */ + if (_connection != null) { + try { + /** + * ZkInterruptedException#construct() honors InterruptedException by calling + * Thread.currentThread().interrupt(); clear it first, so we can safely close the + * zk-connection + */ + Thread.interrupted(); + _connection.close(); + _connection = null; + + /** + * restore interrupted status of current thread + */ + Thread.currentThread().interrupt(); + } catch (InterruptedException e1) { + throw new ZkInterruptedException(e1); + } + } + } finally { + getEventLock().unlock(); + LOG.info("Closed zkclient"); } - super.close(); } public Stat getStat(final String path) { diff --git a/helix-core/src/test/java/org/apache/helix/ZkTestHelper.java b/helix-core/src/test/java/org/apache/helix/ZkTestHelper.java index db2a6d031f..a43bba16d7 100644 --- a/helix-core/src/test/java/org/apache/helix/ZkTestHelper.java +++ b/helix-core/src/test/java/org/apache/helix/ZkTestHelper.java @@ -20,6 +20,7 @@ */ import java.io.BufferedReader; +import java.io.IOException; import java.io.InputStreamReader; import java.io.PrintWriter; import java.net.Socket; @@ -43,9 +44,11 @@ import org.apache.log4j.Logger; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.Watcher.Event.EventType; import org.apache.zookeeper.Watcher.Event.KeeperState; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; +import org.testng.Assert; public class ZkTestHelper { private static Logger LOG = Logger.getLogger(ZkTestHelper.class); @@ -102,13 +105,37 @@ public void process(WatchedEvent event) { LOG.info("After expiry. sessionId: " + Long.toHexString(curZookeeper.getSessionId())); } + /** + * Simulate a zk state change by calling {@link ZkClient#process(WatchedEvent)} directly + */ + public static void simulateZkStateDisconnected(ZkClient client) { + WatchedEvent event = new WatchedEvent(EventType.None, KeeperState.Disconnected, null); + client.process(event); + } + + /** + * Get zk connection session id + * @param client + * @return + */ + public static String getSessionId(ZkClient client) { + ZkConnection connection = ((ZkConnection) client.getConnection()); + ZooKeeper curZookeeper = connection.getZookeeper(); + return Long.toHexString(curZookeeper.getSessionId()); + } + + /** + * Expire current zk session and wait for {@link IZkStateListener#handleNewSession()} invoked + * @param zkClient + * @throws Exception + */ public static void expireSession(final ZkClient zkClient) throws Exception { - final CountDownLatch waitExpire = new CountDownLatch(1); + final CountDownLatch waitNewSession = new CountDownLatch(1); IZkStateListener listener = new IZkStateListener() { @Override public void handleStateChanged(KeeperState state) throws Exception { - // System.err.println("handleStateChanged. state: " + state); + LOG.info("IZkStateListener#handleStateChanged, state: " + state); } @Override @@ -120,7 +147,7 @@ public void handleNewSession() throws Exception { ZooKeeper curZookeeper = connection.getZookeeper(); LOG.info("handleNewSession. sessionId: " + Long.toHexString(curZookeeper.getSessionId())); - waitExpire.countDown(); + waitNewSession.countDown(); } }; @@ -128,12 +155,13 @@ public void handleNewSession() throws Exception { ZkConnection connection = ((ZkConnection) zkClient.getConnection()); ZooKeeper curZookeeper = connection.getZookeeper(); - LOG.info("Before expiry. sessionId: " + Long.toHexString(curZookeeper.getSessionId())); + String oldSessionId = Long.toHexString(curZookeeper.getSessionId()); + LOG.info("Before session expiry. sessionId: " + oldSessionId + ", zk: " + curZookeeper); Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { - LOG.info("Process watchEvent: " + event); + LOG.info("Watcher#process, event: " + event); } }; @@ -144,17 +172,19 @@ public void process(WatchedEvent event) { while (dupZookeeper.getState() != States.CONNECTED) { Thread.sleep(10); } + Assert.assertEquals(dupZookeeper.getState(), States.CONNECTED, "Fail to connect to zk using current session info"); dupZookeeper.close(); // make sure session expiry really happens - waitExpire.await(); + waitNewSession.await(); zkClient.unsubscribeStateChanges(listener); connection = (ZkConnection) zkClient.getConnection(); curZookeeper = connection.getZookeeper(); - // System.err.println("zk: " + oldZookeeper); - LOG.info("After expiry. sessionId: " + Long.toHexString(curZookeeper.getSessionId())); + String newSessionId = Long.toHexString(curZookeeper.getSessionId()); + LOG.info("After session expiry. sessionId: " + newSessionId + ", zk: " + curZookeeper); + Assert.assertNotSame(newSessionId, oldSessionId, "Fail to expire current session, zk: " + curZookeeper); } /** diff --git a/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkFlapping.java b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkFlapping.java new file mode 100644 index 0000000000..5b35148d15 --- /dev/null +++ b/helix-core/src/test/java/org/apache/helix/manager/zk/TestZkFlapping.java @@ -0,0 +1,272 @@ +package org.apache.helix.manager.zk; + +import java.util.Date; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +import org.I0Itec.zkclient.IZkDataListener; +import org.I0Itec.zkclient.IZkStateListener; +import org.apache.helix.PropertyKey; +import org.apache.helix.ZNRecord; +import org.apache.helix.HelixDataAccessor; +import org.apache.helix.TestHelper; +import org.apache.helix.TestHelper.Verifier; +import org.apache.helix.ZkTestHelper; +import org.apache.helix.ZkUnitTestBase; +import org.apache.helix.mock.controller.ClusterController; +import org.apache.helix.mock.participant.MockParticipant; +import org.apache.helix.model.LiveInstance; +import org.apache.zookeeper.Watcher.Event.KeeperState; +import org.testng.Assert; +import org.testng.annotations.Test; + +public class TestZkFlapping extends ZkUnitTestBase { + + @Test + public void testZkSessionExpiry() throws Exception { + String className = TestHelper.getTestClassName(); + String methodName = TestHelper.getTestMethodName(); + String clusterName = className + "_" + methodName; + System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); + + ZkClient client = + new ZkClient(ZK_ADDR, ZkClient.DEFAULT_SESSION_TIMEOUT, + ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer()); + + String path = String.format("/%s", clusterName); + client.createEphemeral(path); + String oldSessionId = ZkTestHelper.getSessionId(client); + ZkTestHelper.expireSession(client); + String newSessionId = ZkTestHelper.getSessionId(client); + Assert.assertNotSame(newSessionId, oldSessionId); + Assert.assertFalse(client.exists(path), "Ephemeral znode should be gone after session expiry"); + client.close(); + System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); + } + + @Test + public void testCloseZkClient() { + String className = TestHelper.getTestClassName(); + String methodName = TestHelper.getTestMethodName(); + String clusterName = className + "_" + methodName; + System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); + + ZkClient client = + new ZkClient(ZK_ADDR, ZkClient.DEFAULT_SESSION_TIMEOUT, + ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer()); + String path = String.format("/%s", clusterName); + client.createEphemeral(path); + + client.close(); + Assert.assertFalse(_gZkClient.exists(path), "Ephemeral node: " + path + + " should be removed after ZkClient#close()"); + System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); + } + + @Test + public void testCloseZkClientInZkClientEventThread() throws Exception { + String className = TestHelper.getTestClassName(); + String methodName = TestHelper.getTestMethodName(); + String clusterName = className + "_" + methodName; + System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); + + final CountDownLatch waitCallback = new CountDownLatch(1); + final ZkClient client = + new ZkClient(ZK_ADDR, ZkClient.DEFAULT_SESSION_TIMEOUT, + ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer()); + String path = String.format("/%s", clusterName); + client.createEphemeral(path); + client.subscribeDataChanges(path, new IZkDataListener() { + + @Override + public void handleDataDeleted(String dataPath) throws Exception { + } + + @Override + public void handleDataChange(String dataPath, Object data) throws Exception { + client.close(); + waitCallback.countDown(); + } + }); + + client.writeData(path, new ZNRecord("test")); + waitCallback.await(); + Assert.assertFalse(_gZkClient.exists(path), "Ephemeral node: " + path + + " should be removed after ZkClient#close() in its own event-thread"); + + System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); + + } + + class ZkStateCountListener implements IZkStateListener { + int count = 0; + + @Override + public void handleStateChanged(KeeperState state) throws Exception { + if (state == KeeperState.Disconnected) { + count++; + } + } + + @Override + public void handleNewSession() throws Exception { + } + } + + @Test + public void testParticipantFlapping() throws Exception { + String className = TestHelper.getTestClassName(); + String methodName = TestHelper.getTestMethodName(); + String clusterName = className + "_" + methodName; + final HelixDataAccessor accessor = + new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); + final PropertyKey.Builder keyBuilder = accessor.keyBuilder(); + + System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); + + TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port + "localhost", // participant name prefix + "TestDB", // resource name prefix + 1, // resources + 32, // partitions per resource + 1, // number of nodes + 1, // replicas + "MasterSlave", false); + + final String instanceName = "localhost_12918"; + MockParticipant participant = new MockParticipant(clusterName, instanceName, ZK_ADDR, null); + participant.syncStart(); + + final ZkClient client = participant.getManager().getZkClient(); + final ZkStateCountListener listener = new ZkStateCountListener(); + client.subscribeStateChanges(listener); + + final AtomicInteger expectDisconnectCnt = new AtomicInteger(0); + final int n = ZKHelixManager.MAX_DISCONNECT_THRESHOLD; + for (int i = 0; i < n; i++) { + String oldSessionId = ZkTestHelper.getSessionId(client); + ZkTestHelper.simulateZkStateDisconnected(client); + expectDisconnectCnt.incrementAndGet(); + // wait until we get invoked by zk state change to disconnected + TestHelper.verify(new Verifier() { + + @Override + public boolean verify() throws Exception { + return listener.count == expectDisconnectCnt.get(); + } + }, 30 * 1000); + + String newSessionId = ZkTestHelper.getSessionId(client); + Assert.assertEquals(newSessionId, oldSessionId); + } + client.unsubscribeStateChanges(listener); + // make sure participant is NOT disconnected + LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName)); + Assert.assertNotNull(liveInstance, "Live-instance should exist after " + n + " disconnects"); + + // trigger flapping + ZkTestHelper.simulateZkStateDisconnected(client); + // wait until we get invoked by zk state change to disconnected + boolean success = TestHelper.verify(new Verifier() { + + @Override + public boolean verify() throws Exception { + return client.getShutdownTrigger(); + } + }, 30 * 1000); + + Assert.assertTrue(success, "The " + (n + 1) + + "th disconnect event should trigger ZkHelixManager#disonnect"); + + // make sure participant is disconnected + success = TestHelper.verify(new TestHelper.Verifier() { + + @Override + public boolean verify() throws Exception { + LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName)); + return liveInstance == null; + } + }, 3 * 1000); + Assert.assertTrue(success, "Live-instance should be gone after " + (n + 1) + " disconnects"); + + System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); + } + + @Test + public void testControllerFlapping() throws Exception { + String className = TestHelper.getTestClassName(); + String methodName = TestHelper.getTestMethodName(); + String clusterName = className + "_" + methodName; + final HelixDataAccessor accessor = + new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient)); + final PropertyKey.Builder keyBuilder = accessor.keyBuilder(); + + System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); + + TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port + "localhost", // participant name prefix + "TestDB", // resource name prefix + 1, // resources + 32, // partitions per resource + 1, // number of nodes + 1, // replicas + "MasterSlave", false); + + ClusterController controller = new ClusterController(clusterName, "controller", ZK_ADDR); + controller.syncStart(); + + final ZkClient client = controller.getManager().getZkClient(); + final ZkStateCountListener listener = new ZkStateCountListener(); + client.subscribeStateChanges(listener); + + final AtomicInteger expectDisconnectCnt = new AtomicInteger(0); + final int n = ZKHelixManager.MAX_DISCONNECT_THRESHOLD; + for (int i = 0; i < n; i++) { + String oldSessionId = ZkTestHelper.getSessionId(client); + ZkTestHelper.simulateZkStateDisconnected(client); + expectDisconnectCnt.incrementAndGet(); + // wait until we get invoked by zk state change to disconnected + TestHelper.verify(new Verifier() { + + @Override + public boolean verify() throws Exception { + return listener.count == expectDisconnectCnt.get(); + } + }, 30 * 1000); + + String newSessionId = ZkTestHelper.getSessionId(client); + Assert.assertEquals(newSessionId, oldSessionId); + } + + // make sure controller is NOT disconnected + LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); + Assert.assertNotNull(leader, "Leader should exist after " + n + " disconnects"); + + // trigger flapping + ZkTestHelper.simulateZkStateDisconnected(client); + // wait until we get invoked by zk state change to disconnected + boolean success = TestHelper.verify(new Verifier() { + + @Override + public boolean verify() throws Exception { + return client.getShutdownTrigger(); + } + }, 30 * 1000); + + Assert.assertTrue(success, "The " + (n + 1) + + "th disconnect event should trigger ZkHelixManager#disonnect"); + + // make sure controller is disconnected + success = TestHelper.verify(new TestHelper.Verifier() { + + @Override + public boolean verify() throws Exception { + LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader()); + return leader == null; + } + }, 5 * 1000); + Assert.assertTrue(success, "Leader should be gone after " + (n + 1) + " disconnects"); + + System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); + } +} From b1c80f44b9b5e6487c8fcdf6ee9ab0ecd46b4167 Mon Sep 17 00:00:00 2001 From: Kanak Biscuitwala Date: Sun, 6 Oct 2013 15:36:36 -0700 Subject: [PATCH 018/113] [HELIX-266] Create an IRC channel for Helix --- src/site/markdown/IRC.md | 29 +++++++++++++++++++++++++++++ src/site/markdown/index.md | 12 +++++++----- src/site/site.xml | 5 +++-- 3 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 src/site/markdown/IRC.md diff --git a/src/site/markdown/IRC.md b/src/site/markdown/IRC.md new file mode 100644 index 0000000000..10dc4cc740 --- /dev/null +++ b/src/site/markdown/IRC.md @@ -0,0 +1,29 @@ + + +IRC +--- + +We're often available on the ```#apachehelix``` channel on the ```chat.freenode.net``` server. You can use your favorite IRC client or the web client below to connect. + +### Web IRC Client + +To connect to our channel, just enter a username below. + +