Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

YARN-11262. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-yarn-server-resourcemanager Part4. #7488

Draft
wants to merge 1 commit into
base: trunk
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.LocalityAppPlacementAllocator;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
import org.junit.Assert;
import org.junit.Test;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;

public class TestAppSchedulingInfo {

Expand All @@ -57,32 +57,32 @@ public void testBacklistChanged() {

appSchedulingInfo.updatePlacesBlacklistedByApp(new ArrayList<String>(),
new ArrayList<String>());
Assert.assertFalse(appSchedulingInfo.getAndResetBlacklistChanged());
Assertions.assertFalse(appSchedulingInfo.getAndResetBlacklistChanged());

ArrayList<String> blacklistAdditions = new ArrayList<String>();
blacklistAdditions.add("node1");
blacklistAdditions.add("node2");
appSchedulingInfo.updatePlacesBlacklistedByApp(blacklistAdditions,
new ArrayList<String>());
Assert.assertTrue(appSchedulingInfo.getAndResetBlacklistChanged());
Assertions.assertTrue(appSchedulingInfo.getAndResetBlacklistChanged());

blacklistAdditions.clear();
blacklistAdditions.add("node1");
appSchedulingInfo.updatePlacesBlacklistedByApp(blacklistAdditions,
new ArrayList<String>());
Assert.assertFalse(appSchedulingInfo.getAndResetBlacklistChanged());
Assertions.assertFalse(appSchedulingInfo.getAndResetBlacklistChanged());

ArrayList<String> blacklistRemovals = new ArrayList<String>();
blacklistRemovals.add("node1");
appSchedulingInfo.updatePlacesBlacklistedByApp(new ArrayList<String>(),
blacklistRemovals);
appSchedulingInfo.updatePlacesBlacklistedByApp(new ArrayList<String>(),
blacklistRemovals);
Assert.assertTrue(appSchedulingInfo.getAndResetBlacklistChanged());
Assertions.assertTrue(appSchedulingInfo.getAndResetBlacklistChanged());

appSchedulingInfo.updatePlacesBlacklistedByApp(new ArrayList<String>(),
blacklistRemovals);
Assert.assertFalse(appSchedulingInfo.getAndResetBlacklistChanged());
Assertions.assertFalse(appSchedulingInfo.getAndResetBlacklistChanged());
}

@Test
Expand All @@ -96,23 +96,23 @@ public void testSchedulerRequestKeyOrdering() {
ts.add(TestUtils.toSchedulerKey(Priority.newInstance(2), 6));
Iterator<SchedulerRequestKey> iter = ts.iterator();
SchedulerRequestKey sk = iter.next();
Assert.assertEquals(0, sk.getPriority().getPriority());
Assert.assertEquals(3, sk.getAllocationRequestId());
Assertions.assertEquals(0, sk.getPriority().getPriority());
Assertions.assertEquals(3, sk.getAllocationRequestId());
sk = iter.next();
Assert.assertEquals(0, sk.getPriority().getPriority());
Assert.assertEquals(4, sk.getAllocationRequestId());
Assertions.assertEquals(0, sk.getPriority().getPriority());
Assertions.assertEquals(4, sk.getAllocationRequestId());
sk = iter.next();
Assert.assertEquals(1, sk.getPriority().getPriority());
Assert.assertEquals(1, sk.getAllocationRequestId());
Assertions.assertEquals(1, sk.getPriority().getPriority());
Assertions.assertEquals(1, sk.getAllocationRequestId());
sk = iter.next();
Assert.assertEquals(1, sk.getPriority().getPriority());
Assert.assertEquals(2, sk.getAllocationRequestId());
Assertions.assertEquals(1, sk.getPriority().getPriority());
Assertions.assertEquals(2, sk.getAllocationRequestId());
sk = iter.next();
Assert.assertEquals(2, sk.getPriority().getPriority());
Assert.assertEquals(5, sk.getAllocationRequestId());
Assertions.assertEquals(2, sk.getPriority().getPriority());
Assertions.assertEquals(5, sk.getAllocationRequestId());
sk = iter.next();
Assert.assertEquals(2, sk.getPriority().getPriority());
Assert.assertEquals(6, sk.getAllocationRequestId());
Assertions.assertEquals(2, sk.getPriority().getPriority());
Assertions.assertEquals(6, sk.getAllocationRequestId());
}

@Test
Expand All @@ -128,7 +128,7 @@ public void testSchedulerKeyAccounting() {
AppSchedulingInfo info = new AppSchedulingInfo(
appAttemptId, "test", queue, mock(ActiveUsersManager.class), 0,
new ResourceUsage(), new HashMap<>(), rmContext, false);
Assert.assertEquals(0, info.getSchedulerKeys().size());
Assertions.assertEquals(0, info.getSchedulerKeys().size());

Priority pri1 = Priority.newInstance(1);
ResourceRequest req1 = ResourceRequest.newInstance(pri1,
Expand All @@ -142,16 +142,16 @@ public void testSchedulerKeyAccounting() {
info.updateResourceRequests(reqs, false);
ArrayList<SchedulerRequestKey> keys =
new ArrayList<>(info.getSchedulerKeys());
Assert.assertEquals(2, keys.size());
Assert.assertEquals(SchedulerRequestKey.create(req1), keys.get(0));
Assert.assertEquals(SchedulerRequestKey.create(req2), keys.get(1));
Assertions.assertEquals(2, keys.size());
Assertions.assertEquals(SchedulerRequestKey.create(req1), keys.get(0));
Assertions.assertEquals(SchedulerRequestKey.create(req2), keys.get(1));

// iterate to verify no ConcurrentModificationException
for (SchedulerRequestKey schedulerKey : info.getSchedulerKeys()) {
info.allocate(NodeType.OFF_SWITCH, null, schedulerKey, null);
}
Assert.assertEquals(1, info.getSchedulerKeys().size());
Assert.assertEquals(SchedulerRequestKey.create(req2),
Assertions.assertEquals(1, info.getSchedulerKeys().size());
Assertions.assertEquals(SchedulerRequestKey.create(req2),
info.getSchedulerKeys().iterator().next());

req2 = ResourceRequest.newInstance(pri2,
Expand All @@ -161,22 +161,22 @@ public void testSchedulerKeyAccounting() {
info.updateResourceRequests(reqs, false);
info.allocate(NodeType.OFF_SWITCH, null, SchedulerRequestKey.create(req2),
null);
Assert.assertEquals(0, info.getSchedulerKeys().size());
Assertions.assertEquals(0, info.getSchedulerKeys().size());

req1 = ResourceRequest.newInstance(pri1,
ResourceRequest.ANY, Resource.newInstance(1024, 1), 5);
reqs.clear();
reqs.add(req1);
info.updateResourceRequests(reqs, false);
Assert.assertEquals(1, info.getSchedulerKeys().size());
Assert.assertEquals(SchedulerRequestKey.create(req1),
Assertions.assertEquals(1, info.getSchedulerKeys().size());
Assertions.assertEquals(SchedulerRequestKey.create(req1),
info.getSchedulerKeys().iterator().next());
req1 = ResourceRequest.newInstance(pri1,
ResourceRequest.ANY, Resource.newInstance(1024, 1), 0);
reqs.clear();
reqs.add(req1);
info.updateResourceRequests(reqs, false);
Assert.assertEquals(0, info.getSchedulerKeys().size());
Assertions.assertEquals(0, info.getSchedulerKeys().size());
}

@Test
Expand All @@ -193,17 +193,17 @@ public void testApplicationPlacementType() {
AppSchedulingInfo info = new AppSchedulingInfo(appAttemptId, "test", queue,
mock(ActiveUsersManager.class), 0, new ResourceUsage(), new HashMap<>(),
rmContext, false);
Assert.assertEquals(info.getApplicationSchedulingEnvs(), new HashMap<>());
Assertions.assertEquals(info.getApplicationSchedulingEnvs(), new HashMap<>());
// This should return null as nothing is set in the conf.
Assert.assertNull(info.getDefaultResourceRequestAppPlacementType());
Assertions.assertNull(info.getDefaultResourceRequestAppPlacementType());
conf = new Configuration();
conf.set(YarnConfiguration.APPLICATION_PLACEMENT_TYPE_CLASS,
DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
info = new AppSchedulingInfo(appAttemptId, "test", queue,
mock(ActiveUsersManager.class), 0, new ResourceUsage(), new HashMap<>(),
rmContext, false);
Assert.assertEquals(info.getDefaultResourceRequestAppPlacementType(),
Assertions.assertEquals(info.getDefaultResourceRequestAppPlacementType(),
DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS);
}

Expand All @@ -223,7 +223,7 @@ public void testApplicationPlacementTypeNotConfigured() {
mock(ActiveUsersManager.class), 0, new ResourceUsage(),
applicationSchedulingEnvs, rmContext, false);
// This should be set from applicationSchedulingEnvs
Assert.assertEquals(info.getDefaultResourceRequestAppPlacementType(),
Assertions.assertEquals(info.getDefaultResourceRequestAppPlacementType(),
LocalityAppPlacementAllocator.class.getName());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSSchedulerNode;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;

import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

import static org.junit.Assert.assertEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;

/**
* Test class to verify ClusterNodeTracker. Using FSSchedulerNode without
Expand All @@ -44,13 +44,13 @@ public class TestClusterNodeTracker {
private ClusterNodeTracker<FSSchedulerNode> nodeTracker;
private ClusterMetrics metrics;

@Before
@BeforeEach
public void setup() {
metrics = ClusterMetrics.getMetrics();
nodeTracker = new ClusterNodeTracker<>();
}

@After
@AfterEach
public void teardown() {
ClusterMetrics.destroy();
}
Expand All @@ -67,33 +67,33 @@ private void addEight4x4Nodes() {
@Test
public void testGetNodeCount() {
addEight4x4Nodes();
assertEquals("Incorrect number of nodes in the cluster",
8, nodeTracker.nodeCount());
assertEquals(
8, nodeTracker.nodeCount(), "Incorrect number of nodes in the cluster");

assertEquals("Incorrect number of nodes in each rack",
4, nodeTracker.nodeCount("rack0"));
assertEquals(
4, nodeTracker.nodeCount("rack0"), "Incorrect number of nodes in each rack");
}

@Test
public void testIncrCapability() {
addEight4x4Nodes();
assertEquals("Cluster Capability Memory incorrect",
metrics.getCapabilityMB(), (4096 * 8));
assertEquals("Cluster Capability Vcores incorrect",
metrics.getCapabilityVirtualCores(), 4 * 8);
assertEquals(
metrics.getCapabilityMB(), (4096 * 8), "Cluster Capability Memory incorrect");
assertEquals(
metrics.getCapabilityVirtualCores(), 4 * 8, "Cluster Capability Vcores incorrect");
}

@Test
public void testGetNodesForResourceName() throws Exception {
addEight4x4Nodes();
assertEquals("Incorrect number of nodes matching ANY",
8, nodeTracker.getNodesByResourceName(ResourceRequest.ANY).size());
assertEquals(
8, nodeTracker.getNodesByResourceName(ResourceRequest.ANY).size(), "Incorrect number of nodes matching ANY");

assertEquals("Incorrect number of nodes matching rack",
4, nodeTracker.getNodesByResourceName("rack0").size());
assertEquals(
4, nodeTracker.getNodesByResourceName("rack0").size(), "Incorrect number of nodes matching rack");

assertEquals("Incorrect number of nodes matching node",
1, nodeTracker.getNodesByResourceName("host0").size());
assertEquals(
1, nodeTracker.getNodesByResourceName("host0").size(), "Incorrect number of nodes matching node");
}

@Test
Expand All @@ -113,8 +113,8 @@ public void testMaxAllowedAllocation() {

Resource result = nodeTracker.getMaxAllowedAllocation();

assertEquals("With no nodes added, the ClusterNodeTracker did not return "
+ "the configured max allocation", maximum, result);
assertEquals(maximum, result, "With no nodes added, the ClusterNodeTracker did not return "
+ "the configured max allocation");

List<RMNode> smallNodes =
MockNodes.newNodes(1, 1, Resource.newInstance(1024, 2,
Expand All @@ -133,72 +133,72 @@ public void testMaxAllowedAllocation() {

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("With a single node added, the ClusterNodeTracker did not "
+ "return that node's resources as the maximum allocation",
mediumNodes.get(0).getTotalCapability(), result);
assertEquals(
mediumNodes.get(0).getTotalCapability(), result, "With a single node added, the ClusterNodeTracker did not "
+ "return that node's resources as the maximum allocation");

nodeTracker.addNode(smallNode);

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("With two nodes added, the ClusterNodeTracker did not "
assertEquals(
Resource.newInstance(4096, 2, Collections.singletonMap("test1", 4L))
, result, "With two nodes added, the ClusterNodeTracker did not "
+ "return a the maximum allocation that was the max of their aggregate "
+ "resources",
Resource.newInstance(4096, 2, Collections.singletonMap("test1", 4L)),
result);
+ "resources");

nodeTracker.removeNode(smallNode.getNodeID());

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("After removing a node, the ClusterNodeTracker did not "
+ "recalculate the adjusted maximum allocation correctly",
mediumNodes.get(0).getTotalCapability(), result);
assertEquals(
mediumNodes.get(0).getTotalCapability(), result, "After removing a node, the ClusterNodeTracker did not "
+ "recalculate the adjusted maximum allocation correctly");

nodeTracker.addNode(largeNode);

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("With two nodes added, the ClusterNodeTracker did not "
assertEquals(
Resource.newInstance(10240, 4, Collections.singletonMap("test1", 2L))
, result, "With two nodes added, the ClusterNodeTracker did not "
+ "return a the maximum allocation that was the max of their aggregate "
+ "resources",
Resource.newInstance(10240, 4, Collections.singletonMap("test1", 2L)),
result);
+ "resources");

nodeTracker.removeNode(largeNode.getNodeID());

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("After removing a node, the ClusterNodeTracker did not "
+ "recalculate the adjusted maximum allocation correctly",
mediumNodes.get(0).getTotalCapability(), result);
assertEquals(
mediumNodes.get(0).getTotalCapability(), result, "After removing a node, the ClusterNodeTracker did not "
+ "recalculate the adjusted maximum allocation correctly");

nodeTracker.removeNode(mediumNode.getNodeID());

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("After removing all nodes, the ClusterNodeTracker did not "
+ "return the configured maximum allocation", maximum, result);
assertEquals(maximum, result, "After removing all nodes, the ClusterNodeTracker did not "
+ "return the configured maximum allocation");

nodeTracker.addNode(smallNode);
nodeTracker.addNode(mediumNode);
nodeTracker.addNode(largeNode);

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("With three nodes added, the ClusterNodeTracker did not "
assertEquals(
Resource.newInstance(10240, 4, Collections.singletonMap("test1", 4L))
, result, "With three nodes added, the ClusterNodeTracker did not "
+ "return a the maximum allocation that was the max of their aggregate "
+ "resources",
Resource.newInstance(10240, 4, Collections.singletonMap("test1", 4L)),
result);
+ "resources");

nodeTracker.removeNode(smallNode.getNodeID());
nodeTracker.removeNode(mediumNode.getNodeID());
nodeTracker.removeNode(largeNode.getNodeID());

result = nodeTracker.getMaxAllowedAllocation();

assertEquals("After removing all nodes, the ClusterNodeTracker did not "
+ "return the configured maximum allocation", maximum, result);
assertEquals(maximum, result, "After removing all nodes, the ClusterNodeTracker did not "
+ "return the configured maximum allocation");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.QueueAdminConfigurationMutationACLPolicy;
import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

import java.io.IOException;
import java.util.Collections;
import java.util.Map;

import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
Expand All @@ -54,7 +54,7 @@ public class TestConfigurationMutationACLPolicies {
private static final Map<String, String> EMPTY_MAP =
Collections.<String, String>emptyMap();

@Before
@BeforeEach
public void setUp() throws IOException {
rmContext = mock(RMContext.class);
scheduler = mock(MutableConfScheduler.class);
Expand Down
Loading