Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HADOOP-19435. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-fs2img. #7579

Merged
merged 3 commits into from
Apr 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,12 @@
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.apache.hadoop.test.TestName;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -107,14 +108,20 @@
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.fileNameFromBlockPoolID;
import static org.apache.hadoop.net.NodeBase.PATH_SEPARATOR_STR;
import static org.junit.Assert.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;

/**
* Integration tests for the Provided implementation.
*/
public class ITestProvidedImplementation {

@Rule public TestName name = new TestName();
@RegisterExtension
private TestName name = new TestName();

public static final Logger LOG =
LoggerFactory.getLogger(ITestProvidedImplementation.class);

Expand All @@ -136,7 +143,7 @@ public class ITestProvidedImplementation {
private Configuration conf;
private MiniDFSCluster cluster;

@Before
@BeforeEach
public void setSeed() throws Exception {
if (fBASE.exists() && !FileUtil.fullyDelete(fBASE)) {
throw new IOException("Could not fully delete " + fBASE);
Expand Down Expand Up @@ -196,7 +203,7 @@ public void setSeed() throws Exception {
}
}

@After
@AfterEach
public void shutdown() throws Exception {
try {
if (cluster != null) {
Expand Down Expand Up @@ -312,7 +319,8 @@ private static List<File> getProvidedNamenodeDirs(String baseDir,
return nnDirs;
}

@Test(timeout=20000)
@Test
@Timeout(value = 20)
public void testLoadImage() throws Exception {
final long seed = r.nextLong();
LOG.info("providedPath: " + providedPath);
Expand All @@ -338,7 +346,8 @@ public void testLoadImage() throws Exception {
}
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testProvidedReporting() throws Exception {
conf.setClass(ImageWriter.Options.UGI_CLASS,
SingleUGIResolver.class, UGIResolver.class);
Expand Down Expand Up @@ -417,7 +426,8 @@ public void testProvidedReporting() throws Exception {
}
}

@Test(timeout=500000)
@Test
@Timeout(value = 500)
public void testDefaultReplication() throws Exception {
int targetReplication = 2;
conf.setInt(FixedBlockMultiReplicaResolver.REPLICATION, targetReplication);
Expand Down Expand Up @@ -529,7 +539,8 @@ private BlockLocation[] createFile(Path path, short replication,
return fs.getFileBlockLocations(path, 0, fileLen);
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testClusterWithEmptyImage() throws IOException {
// start a cluster with 2 datanodes without any provided storage
startCluster(nnDirPath, 2, null,
Expand Down Expand Up @@ -567,8 +578,8 @@ private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client,
private void checkUniqueness(DatanodeInfo[] locations) {
Set<String> set = new HashSet<>();
for (DatanodeInfo info: locations) {
assertFalse("All locations should be unique",
set.contains(info.getDatanodeUuid()));
assertFalse(set.contains(info.getDatanodeUuid()),
"All locations should be unique");
set.add(info.getDatanodeUuid());
}
}
Expand All @@ -577,7 +588,8 @@ private void checkUniqueness(DatanodeInfo[] locations) {
* Tests setting replication of provided files.
* @throws Exception
*/
@Test(timeout=50000)
@Test
@Timeout(value = 50)
public void testSetReplicationForProvidedFiles() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
Expand Down Expand Up @@ -618,7 +630,8 @@ private void setAndUnsetReplication(String filename) throws Exception {
defaultReplication);
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testProvidedDatanodeFailures() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
Expand Down Expand Up @@ -689,7 +702,8 @@ public void testProvidedDatanodeFailures() throws Exception {
}
}

@Test(timeout=300000)
@Test
@Timeout(value = 300)
public void testTransientDeadDatanodes() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
Expand Down Expand Up @@ -727,7 +741,8 @@ private DatanodeStorageInfo getProvidedDatanodeStorageInfo() {
return providedStorageMap.getProvidedStorageInfo();
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testNamenodeRestart() throws Exception {
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
FixedBlockResolver.class);
Expand Down Expand Up @@ -768,7 +783,8 @@ private void verifyFileLocation(int fileIndex, int replication)
}
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testSetClusterID() throws Exception {
String clusterID = "PROVIDED-CLUSTER";
createImage(new FSTreeWalk(providedPath, conf), nnDirPath,
Expand All @@ -783,7 +799,8 @@ public void testSetClusterID() throws Exception {
assertEquals(clusterID, nn.getNamesystem().getClusterId());
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testNumberOfProvidedLocations() throws Exception {
// set default replication to 4
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 4);
Expand Down Expand Up @@ -814,7 +831,8 @@ public void testNumberOfProvidedLocations() throws Exception {
}
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testNumberOfProvidedLocationsManyBlocks() throws Exception {
// increase number of blocks per file to at least 10 blocks per file
conf.setLong(FixedBlockResolver.BLOCKSIZE, baseFileLen/10);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.junit.Test;
import org.junit.jupiter.api.Test;

import java.util.HashMap;
import java.util.Map;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
Expand Down Expand Up @@ -84,37 +82,37 @@ public void testImportAcl() throws Exception {
* Verify ACL enabled TreeWalk iterator throws an error if the external file
* system does not support ACLs.
*/
@Test(expected = UnsupportedOperationException.class)
@Test
public void testACLNotSupported() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PROVIDED_ACLS_IMPORT_ENABLED, true);

FileSystem fs = mock(FileSystem.class);
when(fs.getAclStatus(any())).thenThrow(new UnsupportedOperationException());
Path root = mock(Path.class);
when(root.getFileSystem(conf)).thenReturn(fs);
FileStatus rootFileStatus = new FileStatus(0, true, 0, 0, 1, root);
when(fs.getFileStatus(root)).thenReturn(rootFileStatus);

FSTreeWalk fsTreeWalk = new FSTreeWalk(root, conf);
TreeWalk.TreeIterator iter = fsTreeWalk.iterator();
fail("Unexpected successful creation of iter: " + iter);
assertThrows(UnsupportedOperationException.class, () -> {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PROVIDED_ACLS_IMPORT_ENABLED, true);
FileSystem fs = mock(FileSystem.class);
when(fs.getAclStatus(any())).thenThrow(new UnsupportedOperationException());
Path root = mock(Path.class);
when(root.getFileSystem(conf)).thenReturn(fs);
FileStatus rootFileStatus = new FileStatus(0, true, 0, 0, 1, root);
when(fs.getFileStatus(root)).thenReturn(rootFileStatus);
FSTreeWalk fsTreeWalk = new FSTreeWalk(root, conf);
TreeWalk.TreeIterator iter = fsTreeWalk.iterator();
fail("Unexpected successful creation of iter: " + iter);
});
}

/**
* Verify creation of INode for ACL enabled TreePath throws an error.
*/
@Test(expected = UnsupportedOperationException.class)
@Test
public void testToINodeACLNotSupported() throws Exception {
BlockResolver blockResolver = new FixedBlockResolver();
Path root = new Path("/");
FileStatus rootFileStatus = new FileStatus(0, false, 0, 0, 1, root);

AclStatus acls = mock(AclStatus.class);
TreePath treePath = new TreePath(rootFileStatus, 1, null, null, acls);

UGIResolver ugiResolver = mock(UGIResolver.class);
when(ugiResolver.getPermissionsProto(null, acls)).thenReturn(1L);
treePath.toINode(ugiResolver, blockResolver, null);
assertThrows(UnsupportedOperationException.class, () -> {
BlockResolver blockResolver = new FixedBlockResolver();
Path root = new Path("/");
FileStatus rootFileStatus = new FileStatus(0, false, 0, 0, 1, root);
AclStatus acls = mock(AclStatus.class);
TreePath treePath = new TreePath(rootFileStatus, 1, null, null, acls);
UGIResolver ugiResolver = mock(UGIResolver.class);
when(ugiResolver.getPermissionsProto(null, acls)).thenReturn(1L);
treePath.toINode(ugiResolver, blockResolver, null);
});
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,26 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;

import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import static org.junit.Assert.*;
import org.apache.hadoop.test.TestName;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;

/**
* Validate fixed-size block partitioning.
*/
public class TestFixedBlockResolver {

@Rule public TestName name = new TestName();
@RegisterExtension
private TestName name = new TestName();

private final FixedBlockResolver blockId = new FixedBlockResolver();

@Before
@BeforeEach
public void setup() {
Configuration conf = new Configuration(false);
conf.setLong(FixedBlockResolver.BLOCKSIZE, 512L * (1L << 20));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,24 +25,26 @@

import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.TestName;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;

import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import static org.junit.Assert.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

/**
* Validate randomly generated hierarchies, including fork() support in
* base class.
*/
public class TestRandomTreeWalk {

@Rule public TestName name = new TestName();
@RegisterExtension
private TestName name = new TestName();

private Random r = new Random();

@Before
@BeforeEach
public void setSeed() {
long seed = r.nextLong();
r.setSeed(seed);
Expand Down
Loading