Skip to content

HADOOP-19436. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-gridmix. #7578

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
*/
package org.apache.hadoop.mapred.gridmix;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;

import java.io.File;
import java.io.IOException;
Expand Down Expand Up @@ -114,10 +114,10 @@ protected void onFailure(Job job) {
}

public void verify(ArrayList<JobStory> submitted) throws Exception {
assertEquals("Bad job count", expected, retiredJobs.size());
assertEquals(expected, retiredJobs.size(), "Bad job count");

final ArrayList<Job> succeeded = new ArrayList<Job>();
assertEquals("Bad job count", expected, retiredJobs.drainTo(succeeded));
assertEquals(expected, retiredJobs.drainTo(succeeded), "Bad job count");
final HashMap<String, JobStory> sub = new HashMap<String, JobStory>();
for (JobStory spec : submitted) {
sub.put(spec.getJobID().toString(), spec);
Expand Down Expand Up @@ -152,13 +152,13 @@ public void verify(ArrayList<JobStory> submitted) throws Exception {

final String originalJobId = configuration.get(Gridmix.ORIGINAL_JOB_ID);
final JobStory spec = sub.get(originalJobId);
assertNotNull("No spec for " + jobName, spec);
assertNotNull("No counters for " + jobName, job.getCounters());
assertNotNull(spec, "No spec for " + jobName);
assertNotNull(job.getCounters(), "No counters for " + jobName);
final String originalJobName = spec.getName();
System.out.println("originalJobName=" + originalJobName
+ ";GridmixJobName=" + jobName + ";originalJobID=" + originalJobId);
assertTrue("Original job name is wrong.",
originalJobName.equals(configuration.get(Gridmix.ORIGINAL_JOB_NAME)));
assertTrue(originalJobName.equals(configuration.get(Gridmix.ORIGINAL_JOB_NAME)),
"Original job name is wrong.");

// Gridmix job seqNum contains 6 digits
int seqNumLength = 6;
Expand All @@ -169,25 +169,24 @@ public void verify(ArrayList<JobStory> submitted) throws Exception {
assertTrue(originalJobName.substring(
originalJobName.length() - seqNumLength).equals(jobSeqNum));

assertTrue("Gridmix job name is not in the expected format.",
jobName.equals(GridmixJob.JOB_NAME_PREFIX + jobSeqNum));
assertTrue(jobName.equals(GridmixJob.JOB_NAME_PREFIX + jobSeqNum),
"Gridmix job name is not in the expected format.");
final FileStatus stat = GridmixTestUtils.dfs.getFileStatus(new Path(
GridmixTestUtils.DEST, "" + Integer.parseInt(jobSeqNum)));
assertEquals("Wrong owner for " + jobName, spec.getUser(),
stat.getOwner());
assertEquals(spec.getUser(), stat.getOwner(), "Wrong owner for " + jobName);
final int nMaps = spec.getNumberMaps();
final int nReds = spec.getNumberReduces();

final JobClient client = new JobClient(
GridmixTestUtils.mrvl.getConfig());
final TaskReport[] mReports = client.getMapTaskReports(JobID
.downgrade(job.getJobID()));
assertEquals("Mismatched map count", nMaps, mReports.length);
assertEquals(nMaps, mReports.length, "Mismatched map count");
check(TaskType.MAP, spec, mReports, 0, 0, SLOPBYTES, nReds);

final TaskReport[] rReports = client.getReduceTaskReports(JobID
.downgrade(job.getJobID()));
assertEquals("Mismatched reduce count", nReds, rReports.length);
assertEquals(nReds, rReports.length, "Mismatched reduce count");
check(TaskType.REDUCE, spec, rReports, nMaps * SLOPBYTES, 2 * nMaps, 0,
0);

Expand Down Expand Up @@ -273,43 +272,37 @@ private void check(final TaskType type, JobStory spec,
Arrays.sort(specInputBytes);
Arrays.sort(runInputBytes);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue("Mismatched " + type + " input bytes " + specInputBytes[i]
+ "/" + runInputBytes[i],
eqPlusMinus(runInputBytes[i], specInputBytes[i], extraInputBytes));
assertTrue(eqPlusMinus(runInputBytes[i], specInputBytes[i], extraInputBytes),
"Mismatched " + type + " input bytes " + specInputBytes[i]
+ "/" + runInputBytes[i]);
}

// Check input records
Arrays.sort(specInputRecords);
Arrays.sort(runInputRecords);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue(
"Mismatched " + type + " input records " + specInputRecords[i]
+ "/" + runInputRecords[i],
eqPlusMinus(runInputRecords[i], specInputRecords[i],
extraInputRecords));
assertTrue(eqPlusMinus(runInputRecords[i], specInputRecords[i],
extraInputRecords), "Mismatched " + type + " input records " + specInputRecords[i]
+ "/" + runInputRecords[i]);
}

// Check output bytes
Arrays.sort(specOutputBytes);
Arrays.sort(runOutputBytes);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue(
"Mismatched " + type + " output bytes " + specOutputBytes[i] + "/"
+ runOutputBytes[i],
eqPlusMinus(runOutputBytes[i], specOutputBytes[i], extraOutputBytes));
assertTrue(eqPlusMinus(runOutputBytes[i], specOutputBytes[i], extraOutputBytes),
"Mismatched " + type + " output bytes " + specOutputBytes[i] + "/"
+ runOutputBytes[i]);
}

// Check output records
Arrays.sort(specOutputRecords);
Arrays.sort(runOutputRecords);
for (int i = 0; i < runTasks.length; ++i) {
assertTrue(
"Mismatched " + type + " output records " + specOutputRecords[i]
+ "/" + runOutputRecords[i],
eqPlusMinus(runOutputRecords[i], specOutputRecords[i],
extraOutputRecords));
assertTrue(eqPlusMinus(runOutputRecords[i], specOutputRecords[i],
extraOutputRecords), "Mismatched " + type + " output records " + specOutputRecords[i]
+ "/" + runOutputRecords[i]);
}

}

private static boolean eqPlusMinus(long a, long b, long x) {
Expand Down Expand Up @@ -372,7 +365,7 @@ protected void doSubmission(String jobCreatorName, boolean defaultOutputPath)
GridmixTestUtils.dfs.setPermission(root, new FsPermission((short) 777));

int res = ToolRunner.run(conf, client, argv);
assertEquals("Client exited with nonzero status", 0, res);
assertEquals(0, res, "Client exited with nonzero status");
client.checkMonitor();
} catch (Exception e) {
e.printStackTrace();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,10 @@
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import static org.junit.Assert.*;
import org.junit.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.Test;

/**
* Test {@link CompressionEmulationUtil}
Expand Down Expand Up @@ -169,7 +171,7 @@ private static void runDataGenJob(Configuration conf, Path tempDir)
job.submit();
int ret = job.waitForCompletion(true) ? 0 : 1;

assertEquals("Job Failed", 0, ret);
assertEquals(0, ret, "Job Failed");
}

/**
Expand Down Expand Up @@ -260,7 +262,7 @@ public void testCompressionRatios() throws Exception {
} catch (RuntimeException re) {
failed = true;
}
assertTrue("Compression ratio min value (0.07) check failed!", failed);
assertTrue(failed, "Compression ratio min value (0.07) check failed!");

// test with a compression ratio of 0.01 which less than the max supported
// value of 0.68
Expand All @@ -270,7 +272,7 @@ public void testCompressionRatios() throws Exception {
} catch (RuntimeException re) {
failed = true;
}
assertTrue("Compression ratio max value (0.68) check failed!", failed);
assertTrue(failed, "Compression ratio max value (0.68) check failed!");
}

/**
Expand Down Expand Up @@ -380,10 +382,10 @@ public void testCompressibleGridmixRecord() throws IOException {
GridmixRecord recordRead = new GridmixRecord();
recordRead.readFields(new DataInputStream(in));

assertEquals("Record size mismatch in a compressible GridmixRecord",
dataSize, recordRead.getSize());
assertTrue("Failed to generate a compressible GridmixRecord",
recordRead.getSize() > compressedFileSize);
assertEquals(dataSize, recordRead.getSize(),
"Record size mismatch in a compressible GridmixRecord");
assertTrue(recordRead.getSize() > compressedFileSize,
"Failed to generate a compressible GridmixRecord");

// check if the record can generate data with the desired compression ratio
float seenRatio = ((float)compressedFileSize)/dataSize;
Expand Down Expand Up @@ -456,7 +458,7 @@ public void testPossiblyCompressedDecompressedStreams() throws IOException {
.getPossiblyDecompressedInputStream(compressedFile, conf, 0);
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String readLine = reader.readLine();
assertEquals("Compression/Decompression error", inputLine, readLine);
assertEquals(inputLine, readLine, "Compression/Decompression error");
reader.close();
}

Expand Down Expand Up @@ -555,7 +557,7 @@ public void testFileQueueDecompression() throws IOException {
queue.read(bytes);
queue.close();
String readLine = new String(bytes);
assertEquals("Compression/Decompression error", inputLine, readLine);
assertEquals(inputLine, readLine, "Compression/Decompression error");
}

/**
Expand Down
Loading